Compare commits

...

30 commits

Author SHA1 Message Date
MonsterDruide1 9879623bc9
Merge 18530d0bf6 into 35d93a7c41 2024-03-29 12:03:56 +01:00
Michael Scire 35d93a7c41 git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "fadec2981"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "fadec2981"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2024-03-29 03:20:12 -07:00
Michael Scire 410f23035e docs: update changelog for 1.7.0 2024-03-29 03:19:17 -07:00
Michael Scire 29cc13543a kern: fix using memory config for half-of-true-size 2024-03-29 03:18:20 -07:00
Michael Scire 31ad4eec1d git subrepo push emummc
subrepo:
  subdir:   "emummc"
  merged:   "832b24426"
upstream:
  origin:   "https://github.com/m4xw/emummc"
  branch:   "develop"
  commit:   "832b24426"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2024-03-29 02:57:53 -07:00
Michael Scire 3ccb0ae02b git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "8b85add71"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "8b85add71"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2024-03-29 02:56:10 -07:00
Michael Scire 4f7db6e60e docs: add changelog for 1.7.0 2024-03-29 02:54:40 -07:00
Michael Scire a325e18cb5 loader: add usb3 patches for 18.0.0 2024-03-29 02:41:14 -07:00
Michael Scire af41272591 spl: add support for new spl:es command 33 2024-03-29 02:41:14 -07:00
Michael Scire 551821e7e2 erpt: actually support non-sequential ids, nintendo why 2024-03-29 02:41:14 -07:00
Michael Scire b081762657 emummc: update for 18.0.0 2024-03-29 02:41:14 -07:00
Michael Scire d2c2a94c5e erpt: add new IDs/categories 2024-03-29 02:41:14 -07:00
Michael Scire 4ff9278d11 jpegdec: stop bundling (TODO post-prerelease) 2024-03-29 02:41:14 -07:00
Michael Scire 21c85c6a4f exo/fusee: apparently 18.0.0 did not burn a fuse 2024-03-29 02:41:14 -07:00
Michael Scire 05090005b7 svc: advertise support for 18.3.0.0 2024-03-29 02:41:14 -07:00
Michael Scire c0487ad384 kern: fix whoops in new page table logic 2024-03-29 02:41:14 -07:00
Michael Scire ecbe5cd406 kern: refactor smc helpers to share more common logic 2024-03-29 02:41:14 -07:00
Michael Scire 4fe139ea52 kern: return ExceptionType_UnalignedData on data abort caused by alignment fault 2024-03-29 02:41:14 -07:00
Michael Scire 6922eae3e7 kern: add KPageGroup::CopyRangeTo 2024-03-29 02:41:14 -07:00
Michael Scire 952188fc73 kern: implement new attr tracking for memory range/traversal context 2024-03-29 02:41:14 -07:00
Michael Scire c0a4fc30a8 kern: simplify size calculations in KPageTableBase::Read/WriteDebugIoMemory 2024-03-29 02:41:14 -07:00
Michael Scire 0b04c89a84 kern: pass properties directly to KPageTableBase::AllocateAndMapPagesImpl 2024-03-29 02:41:14 -07:00
Michael Scire 217dd1260a kern: take alignment argument in KMemoryManager::AllocateAndOpen 2024-03-29 02:41:14 -07:00
Michael Scire 8aa62a54d8 kern/os: support CreateProcessFlag_EnableAliasRegionExtraSize 2024-03-29 02:41:14 -07:00
Michael Scire 25bae14064 kern: revise KPageTableBase region layout logic to match 18.0.0 changes 2024-03-29 02:41:14 -07:00
Michael Scire 900913fe3b kern: fix longstanding bug in ConvertToKMemoryPermission 2024-03-29 02:41:14 -07:00
Michael Scire 7562f807fd kern: pass kernel base from KernelLdr to Kernel 2024-03-29 02:41:14 -07:00
Michael Scire cf5895e04f kern: use userspace access instructions to read from tlr 2024-03-29 02:41:14 -07:00
Michael Scire 1f37fbed1d fusee/exo/ams: update with new keydata/version enums 2024-03-29 02:41:14 -07:00
MonsterDruide1 18530d0bf6 dmnt.gen2: More logging in GDB 2024-03-12 13:24:07 +01:00
69 changed files with 1929 additions and 1266 deletions

View file

@ -84,7 +84,7 @@ dist-no-debug: package3 $(CURRENT_DIRECTORY)/$(ATMOSPHERE_OUT_DIR)
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000034
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000036
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000037
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c
#mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000042
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000420
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000b240
@ -98,7 +98,7 @@ dist-no-debug: package3 $(CURRENT_DIRECTORY)/$(ATMOSPHERE_OUT_DIR)
cp stratosphere/fatal/$(ATMOSPHERE_OUT_DIR)/fatal.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000034/exefs.nsp
cp stratosphere/creport/$(ATMOSPHERE_OUT_DIR)/creport.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000036/exefs.nsp
cp stratosphere/ro/$(ATMOSPHERE_OUT_DIR)/ro.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000037/exefs.nsp
cp stratosphere/jpegdec/$(ATMOSPHERE_OUT_DIR)/jpegdec.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c/exefs.nsp
#cp stratosphere/jpegdec/$(ATMOSPHERE_OUT_DIR)/jpegdec.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c/exefs.nsp
cp stratosphere/pgl/$(ATMOSPHERE_OUT_DIR)/pgl.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000042/exefs.nsp
cp stratosphere/LogManager/$(ATMOSPHERE_OUT_DIR)/LogManager.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000420/exefs.nsp
cp stratosphere/htc/$(ATMOSPHERE_OUT_DIR)/htc.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000b240/exefs.nsp

View file

@ -1,4 +1,26 @@
# Changelog
## 1.7.0
+ Basic support was added for 18.0.0.
+ The console should boot and atmosphère should be fully functional. However, not all modules have been fully updated to reflect the latest changes.
+ There shouldn't be anything user visible resulting from this, but it will be addressed in a future atmosphère update, once I am not traveling so much.
+ `exosphère` was updated to reflect the latest official secure monitor behavior.
+ `mesosphère` was updated to reflect the latest official kernel behavior.
+ `spl` was updated to reflect the latest official behavior.
+ `fusee`'s no longer supports applying IPS patches to KIPs.
+ The only KIPs that are ever present are a) atmosphère modules, b) custom system modules, or c) FS.
+ The IPS subsystem was originally designed to make nogc patches work for FS, but these are now internal, and it appears the literal only kip patches that exist are for piracy.
+ I could not find any kip patches posted anywhere made for any other purpose.
+ It fundamentally does not make sense to slow down boot for every normal user for a feature that has no actual use-case, especially when `fusee` seeks to be a minimal bootloader.
+ Minor improvements were made to atmosphere's gdbstub, including:
+ Support was added for QStartNoAckMode.
+ An issue was fixed that could cause a fatal error when creating too many breakpoints.
+ A number of minor issues were fixed and improvements were made, including:
+ `pt-BR` (`PortugueseBr`) is now accepted as a valid language when overriding game locales.
+ A bug was fixed that could cause atmosphere to incorrectly serialize output object IDs over IPC when using domain objects.
+ A bug was fixed in `pm`'s resource limit boost logic that could potentially cause legitimate boosts to fail in certain circumstances.
+ `loader`/`ro` will now throw a fatal error when using invalid IPS patches that go out of bounds, instead of corrupting memory.
+ Support was fixed for booting using a memory configuration of half of the true available memory (e.g. forcing a 4GB configuration on an 8GB board).
+ General system stability improvements to enhance the user's experience.
## 1.6.2
+ Support was finished for 17.0.0.
+ `erpt` was updated to support the latest official behavior.

4
emummc/.gitrepo vendored
View file

@ -6,7 +6,7 @@
[subrepo]
remote = https://github.com/m4xw/emummc
branch = develop
commit = 9513a5412057b1f1bc44ed8e717c57c726763a88
parent = e4d08ae0c5342cdb0875d164522a63ec9d233052
commit = 832b2442685b45b086697ffe09c5fde05d7444e9
parent = 3ccb0ae02bc06769f44d61233bf177301ba9d5f3
method = merge
cmdver = 0.4.1

2
emummc/README.md vendored
View file

@ -2,7 +2,7 @@
*A SDMMC driver replacement for Nintendo's Filesystem Services, by **m4xw***
### Supported Horizon Versions
**1.0.0 - 17.0.0**
**1.0.0 - 18.0.0**
## Features
* Arbitrary SDMMC backend selection

View file

@ -69,6 +69,8 @@
#include "offsets/1603_exfat.h"
#include "offsets/1700.h"
#include "offsets/1700_exfat.h"
#include "offsets/1800.h"
#include "offsets/1800_exfat.h"
#include "../utils/fatal.h"
#define GET_OFFSET_STRUCT_NAME(vers) g_offsets##vers
@ -149,6 +151,8 @@ DEFINE_OFFSET_STRUCT(_1603);
DEFINE_OFFSET_STRUCT(_1603_EXFAT);
DEFINE_OFFSET_STRUCT(_1700);
DEFINE_OFFSET_STRUCT(_1700_EXFAT);
DEFINE_OFFSET_STRUCT(_1800);
DEFINE_OFFSET_STRUCT(_1800_EXFAT);
const fs_offsets_t *get_fs_offsets(enum FS_VER version) {
switch (version) {
@ -258,6 +262,10 @@ const fs_offsets_t *get_fs_offsets(enum FS_VER version) {
return &(GET_OFFSET_STRUCT_NAME(_1700));
case FS_VER_17_0_0_EXFAT:
return &(GET_OFFSET_STRUCT_NAME(_1700_EXFAT));
case FS_VER_18_0_0:
return &(GET_OFFSET_STRUCT_NAME(_1800));
case FS_VER_18_0_0_EXFAT:
return &(GET_OFFSET_STRUCT_NAME(_1800_EXFAT));
default:
fatal_abort(Fatal_UnknownVersion);
}

View file

@ -101,6 +101,9 @@ enum FS_VER
FS_VER_17_0_0,
FS_VER_17_0_0_EXFAT,
FS_VER_18_0_0,
FS_VER_18_0_0_EXFAT,
FS_VER_MAX,
};

59
emummc/source/FS/offsets/1800.h vendored Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1800_H__
#define __FS_1800_H__
// Accessor vtable getters
#define FS_OFFSET_1800_SDMMC_ACCESSOR_GC 0x18AB00
#define FS_OFFSET_1800_SDMMC_ACCESSOR_SD 0x18C800
#define FS_OFFSET_1800_SDMMC_ACCESSOR_NAND 0x18AFE0
// Hooks
#define FS_OFFSET_1800_SDMMC_WRAPPER_READ 0x186A50
#define FS_OFFSET_1800_SDMMC_WRAPPER_WRITE 0x186AB0
#define FS_OFFSET_1800_RTLD 0x2A3A4
#define FS_OFFSET_1800_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x44)))
#define FS_OFFSET_1800_CLKRST_SET_MIN_V_CLK_RATE 0x1A77D0
// Misc funcs
#define FS_OFFSET_1800_LOCK_MUTEX 0x17FCC0
#define FS_OFFSET_1800_UNLOCK_MUTEX 0x17FD10
#define FS_OFFSET_1800_SDMMC_WRAPPER_CONTROLLER_OPEN 0x186A10
#define FS_OFFSET_1800_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x186A30
// Misc Data
#define FS_OFFSET_1800_SD_MUTEX 0xFD13F0
#define FS_OFFSET_1800_NAND_MUTEX 0xFCCB28
#define FS_OFFSET_1800_ACTIVE_PARTITION 0xFCCB68
#define FS_OFFSET_1800_SDMMC_DAS_HANDLE 0xFB1950
// NOPs
#define FS_OFFSET_1800_SD_DAS_INIT 0x28F24
// Nintendo Paths
#define FS_OFFSET_1800_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00068B08, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x000758DC, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007C77C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x000905C4, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1800_H__

59
emummc/source/FS/offsets/1800_exfat.h vendored Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1800_EXFAT_H__
#define __FS_1800_EXFAT_H__
// Accessor vtable getters
#define FS_OFFSET_1800_EXFAT_SDMMC_ACCESSOR_GC 0x195B90
#define FS_OFFSET_1800_EXFAT_SDMMC_ACCESSOR_SD 0x197890
#define FS_OFFSET_1800_EXFAT_SDMMC_ACCESSOR_NAND 0x196070
// Hooks
#define FS_OFFSET_1800_EXFAT_SDMMC_WRAPPER_READ 0x191AE0
#define FS_OFFSET_1800_EXFAT_SDMMC_WRAPPER_WRITE 0x191B40
#define FS_OFFSET_1800_EXFAT_RTLD 0x2A3A4
#define FS_OFFSET_1800_EXFAT_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x44)))
#define FS_OFFSET_1800_EXFAT_CLKRST_SET_MIN_V_CLK_RATE 0x1B2860
// Misc funcs
#define FS_OFFSET_1800_EXFAT_LOCK_MUTEX 0x18AD50
#define FS_OFFSET_1800_EXFAT_UNLOCK_MUTEX 0x18ADA0
#define FS_OFFSET_1800_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0x191AA0
#define FS_OFFSET_1800_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x191AC0
// Misc Data
#define FS_OFFSET_1800_EXFAT_SD_MUTEX 0xFE33F0
#define FS_OFFSET_1800_EXFAT_NAND_MUTEX 0xFDEB28
#define FS_OFFSET_1800_EXFAT_ACTIVE_PARTITION 0xFDEB68
#define FS_OFFSET_1800_EXFAT_SDMMC_DAS_HANDLE 0xFBE950
// NOPs
#define FS_OFFSET_1800_EXFAT_SD_DAS_INIT 0x28F24
// Nintendo Paths
#define FS_OFFSET_1800_EXFAT_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00068B08, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x000758DC, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007C77C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x000905C4, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1800_EXFAT_H__

View file

@ -85,10 +85,10 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
/* We can get away with only including latest because exosphere supports newer-than-expected master key in engine. */
/* TODO: Update on next change of keys. */
/* Mariko Development Master Kek Source. */
.byte 0x43, 0xDB, 0x9D, 0x88, 0xDB, 0x38, 0xE9, 0xBF, 0x3D, 0xD7, 0x83, 0x39, 0xEF, 0xB1, 0x4F, 0xA7
.byte 0xE4, 0x45, 0xD0, 0x14, 0xA0, 0xE5, 0xE9, 0x4B, 0xFE, 0x76, 0xF4, 0x29, 0x41, 0xBB, 0x64, 0xED
/* Mariko Production Master Kek Source. */
.byte 0x8D, 0xEE, 0x9E, 0x11, 0x36, 0x3A, 0x9B, 0x0A, 0x6A, 0xC7, 0xBB, 0xE9, 0xD1, 0x03, 0xF7, 0x80
.byte 0x4F, 0x41, 0x3C, 0x3B, 0xFB, 0x6A, 0x01, 0x2A, 0x68, 0x9F, 0x83, 0xE9, 0x53, 0xBD, 0x16, 0xD2
/* Development Master Key Vectors. */
.byte 0x46, 0x22, 0xB4, 0x51, 0x9A, 0x7E, 0xA7, 0x7F, 0x62, 0xA1, 0x1F, 0x8F, 0xC5, 0x3A, 0xDB, 0xFE /* Zeroes encrypted with Master Key 00. */
@ -108,6 +108,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0xD6, 0x80, 0x98, 0xC0, 0xFA, 0xC7, 0x13, 0xCB, 0x93, 0xD2, 0x0B, 0x82, 0x4C, 0xA1, 0x7B, 0x8D /* Master key 0D encrypted with Master key 0E. */
.byte 0x78, 0x66, 0x19, 0xBD, 0x86, 0xE7, 0xC1, 0x09, 0x9B, 0x6F, 0x92, 0xB2, 0x58, 0x7D, 0xCF, 0x26 /* Master key 0E encrypted with Master key 0F. */
.byte 0x39, 0x1E, 0x7E, 0xF8, 0x7E, 0x73, 0xEA, 0x6F, 0xAF, 0x00, 0x3A, 0xB4, 0xAA, 0xB8, 0xB7, 0x59 /* Master key 0F encrypted with Master key 10. */
.byte 0x0C, 0x75, 0x39, 0x15, 0x53, 0xEA, 0x81, 0x11, 0xA3, 0xE0, 0xDC, 0x3D, 0x0E, 0x76, 0xC6, 0xB8 /* Master key 10 encrypted with Master key 11. */
/* Production Master Key Vectors. */
.byte 0x0C, 0xF0, 0x59, 0xAC, 0x85, 0xF6, 0x26, 0x65, 0xE1, 0xE9, 0x19, 0x55, 0xE6, 0xF2, 0x67, 0x3D /* Zeroes encrypted with Master Key 00. */
@ -127,6 +128,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0xB1, 0x81, 0xA6, 0x0D, 0x72, 0xC7, 0xEE, 0x15, 0x21, 0xF3, 0xC0, 0xB5, 0x6B, 0x61, 0x6D, 0xE7 /* Master key 0D encrypted with Master key 0E. */
.byte 0xAF, 0x11, 0x4C, 0x67, 0x17, 0x7A, 0x52, 0x43, 0xF7, 0x70, 0x2F, 0xC7, 0xEF, 0x81, 0x72, 0x16 /* Master key 0E encrypted with Master key 0F. */
.byte 0x25, 0x12, 0x8B, 0xCB, 0xB5, 0x46, 0xA1, 0xF8, 0xE0, 0x52, 0x15, 0xB7, 0x0B, 0x57, 0x00, 0xBD /* Master key 0F encrypted with Master key 10. */
.byte 0x58, 0x15, 0xD2, 0xF6, 0x8A, 0xE8, 0x19, 0xAB, 0xFB, 0x2D, 0x52, 0x9D, 0xE7, 0x55, 0xF3, 0x93 /* Master key 10 encrypted with Master key 11. */
/* Device Master Key Source Sources. */
.byte 0x8B, 0x4E, 0x1C, 0x22, 0x42, 0x07, 0xC8, 0x73, 0x56, 0x94, 0x08, 0x8B, 0xCC, 0x47, 0x0F, 0x5D /* 4.0.0 Device Master Key Source Source. */
@ -143,6 +145,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x5E, 0xC9, 0xC5, 0x0A, 0xD0, 0x5F, 0x8B, 0x7B, 0xA7, 0x39, 0xEA, 0xBC, 0x60, 0x0F, 0x74, 0xE6 /* 15.0.0 Device Master Key Source Source. */
.byte 0xEA, 0x90, 0x6E, 0xA8, 0xAE, 0x92, 0x99, 0x64, 0x36, 0xC1, 0xF3, 0x1C, 0xC6, 0x32, 0x83, 0x8C /* 16.0.0 Device Master Key Source Source. */
.byte 0xDA, 0xB9, 0xD6, 0x77, 0x52, 0x2D, 0x1F, 0x78, 0x73, 0xC9, 0x98, 0x5B, 0x06, 0xFE, 0xA0, 0x52 /* 17.0.0 Device Master Key Source Source. */
.byte 0x14, 0xF5, 0xA5, 0xD0, 0x73, 0x6D, 0x44, 0x80, 0x5F, 0x31, 0x5A, 0x8F, 0x1E, 0xD4, 0x0D, 0x63 /* 18.0.0 Device Master Key Source Source. */
/* Development Device Master Kek Sources. */
.byte 0xD6, 0xBD, 0x9F, 0xC6, 0x18, 0x09, 0xE1, 0x96, 0x20, 0x39, 0x60, 0xD2, 0x89, 0x83, 0x31, 0x34 /* 4.0.0 Device Master Kek Source. */
@ -159,6 +162,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0xAE, 0x05, 0x48, 0x65, 0xAB, 0x17, 0x9D, 0x3D, 0x51, 0xB7, 0x56, 0xBD, 0x9B, 0x0B, 0x5B, 0x6E /* 15.0.0 Device Master Kek Source. */
.byte 0xFF, 0xF6, 0x4B, 0x0F, 0xFF, 0x0D, 0xC0, 0x4F, 0x56, 0x8A, 0x40, 0x74, 0x67, 0xC5, 0xFE, 0x9F /* 16.0.0 Device Master Kek Source. */
.byte 0x4E, 0xCE, 0x7B, 0x2A, 0xEA, 0x2E, 0x3D, 0x16, 0xD5, 0x2A, 0xDE, 0xF6, 0xF8, 0x6A, 0x7D, 0x43 /* 17.0.0 Device Master Kek Source. */
.byte 0x3B, 0x00, 0x89, 0xD7, 0xA9, 0x9E, 0xB7, 0x70, 0x86, 0x00, 0xC3, 0x49, 0x52, 0x8C, 0xA4, 0xAF /* 18.0.0 Device Master Kek Source. */
/* Production Device Master Kek Sources. */
.byte 0x88, 0x62, 0x34, 0x6E, 0xFA, 0xF7, 0xD8, 0x3F, 0xE1, 0x30, 0x39, 0x50, 0xF0, 0xB7, 0x5D, 0x5D /* 4.0.0 Device Master Kek Source. */
@ -175,3 +179,4 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x7C, 0x30, 0xED, 0x8B, 0x39, 0x25, 0x2C, 0x08, 0x8F, 0x48, 0xDC, 0x28, 0xE6, 0x1A, 0x6B, 0x49 /* 15.0.0 Device Master Kek Source. */
.byte 0xF0, 0xF3, 0xFF, 0x52, 0x75, 0x2F, 0xBA, 0x4D, 0x09, 0x72, 0x30, 0x89, 0xA9, 0xDF, 0xFE, 0x1F /* 16.0.0 Device Master Kek Source. */
.byte 0x21, 0xD6, 0x35, 0xF1, 0x0F, 0x7A, 0xF0, 0x5D, 0xDF, 0x79, 0x1C, 0x7A, 0xE4, 0x32, 0x82, 0x9E /* 17.0.0 Device Master Kek Source. */
.byte 0xE7, 0x85, 0x8C, 0xA2, 0xF4, 0x49, 0xCB, 0x07, 0xD1, 0x8E, 0x48, 0x1B, 0xE8, 0x1E, 0x28, 0x3B /* 18.0.0 Device Master Kek Source. */

View file

@ -94,7 +94,7 @@ namespace ams::secmon::boot {
}
/* Check that the key generation is one that we can use. */
static_assert(pkg1::KeyGeneration_Count == 17);
static_assert(pkg1::KeyGeneration_Count == 18);
if (key_generation >= pkg1::KeyGeneration_Count) {
return false;
}

View file

@ -23,17 +23,17 @@ namespace ams::nxboot {
alignas(se::AesBlockSize) constexpr inline const u8 MarikoMasterKekSource[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0x8D, 0xEE, 0x9E, 0x11, 0x36, 0x3A, 0x9B, 0x0A, 0x6A, 0xC7, 0xBB, 0xE9, 0xD1, 0x03, 0xF7, 0x80
0x4F, 0x41, 0x3C, 0x3B, 0xFB, 0x6A, 0x01, 0x2A, 0x68, 0x9F, 0x83, 0xE9, 0x53, 0xBD, 0x16, 0xD2
};
alignas(se::AesBlockSize) constexpr inline const u8 MarikoMasterKekSourceDev[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0x43, 0xDB, 0x9D, 0x88, 0xDB, 0x38, 0xE9, 0xBF, 0x3D, 0xD7, 0x83, 0x39, 0xEF, 0xB1, 0x4F, 0xA7
0xE4, 0x45, 0xD0, 0x14, 0xA0, 0xE5, 0xE9, 0x4B, 0xFE, 0x76, 0xF4, 0x29, 0x41, 0xBB, 0x64, 0xED
};
alignas(se::AesBlockSize) constexpr inline const u8 EristaMasterKekSource[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0x71, 0xB9, 0xA6, 0xC0, 0xFF, 0x97, 0x6B, 0x0C, 0xB4, 0x40, 0xB9, 0xD5, 0x81, 0x5D, 0x81, 0x90
0x00, 0x04, 0x5D, 0xF0, 0x4D, 0xCD, 0x14, 0xA3, 0x1C, 0xBF, 0xDE, 0x48, 0x55, 0xBA, 0x35, 0xC1
};
alignas(se::AesBlockSize) constexpr inline const u8 KeyblobKeySource[se::AesBlockSize] = {
@ -71,6 +71,7 @@ namespace ams::nxboot {
{ 0x5E, 0xC9, 0xC5, 0x0A, 0xD0, 0x5F, 0x8B, 0x7B, 0xA7, 0x39, 0xEA, 0xBC, 0x60, 0x0F, 0x74, 0xE6 }, /* 15.0.0 Device Master Key Source Source. */
{ 0xEA, 0x90, 0x6E, 0xA8, 0xAE, 0x92, 0x99, 0x64, 0x36, 0xC1, 0xF3, 0x1C, 0xC6, 0x32, 0x83, 0x8C }, /* 16.0.0 Device Master Key Source Source. */
{ 0xDA, 0xB9, 0xD6, 0x77, 0x52, 0x2D, 0x1F, 0x78, 0x73, 0xC9, 0x98, 0x5B, 0x06, 0xFE, 0xA0, 0x52 }, /* 17.0.0 Device Master Key Source Source. */
{ 0x14, 0xF5, 0xA5, 0xD0, 0x73, 0x6D, 0x44, 0x80, 0x5F, 0x31, 0x5A, 0x8F, 0x1E, 0xD4, 0x0D, 0x63 }, /* 18.0.0 Device Master Key Source Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 DeviceMasterKekSources[pkg1::OldDeviceMasterKeyCount][se::AesBlockSize] = {
@ -88,6 +89,7 @@ namespace ams::nxboot {
{ 0x7C, 0x30, 0xED, 0x8B, 0x39, 0x25, 0x2C, 0x08, 0x8F, 0x48, 0xDC, 0x28, 0xE6, 0x1A, 0x6B, 0x49 }, /* 15.0.0 Device Master Kek Source. */
{ 0xF0, 0xF3, 0xFF, 0x52, 0x75, 0x2F, 0xBA, 0x4D, 0x09, 0x72, 0x30, 0x89, 0xA9, 0xDF, 0xFE, 0x1F }, /* 16.0.0 Device Master Kek Source. */
{ 0x21, 0xD6, 0x35, 0xF1, 0x0F, 0x7A, 0xF0, 0x5D, 0xDF, 0x79, 0x1C, 0x7A, 0xE4, 0x32, 0x82, 0x9E }, /* 17.0.0 Device Master Kek Source. */
{ 0xE7, 0x85, 0x8C, 0xA2, 0xF4, 0x49, 0xCB, 0x07, 0xD1, 0x8E, 0x48, 0x1B, 0xE8, 0x1E, 0x28, 0x3B }, /* 18.0.0 Device Master Kek Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 DeviceMasterKekSourcesDev[pkg1::OldDeviceMasterKeyCount][se::AesBlockSize] = {
@ -105,6 +107,7 @@ namespace ams::nxboot {
{ 0xAE, 0x05, 0x48, 0x65, 0xAB, 0x17, 0x9D, 0x3D, 0x51, 0xB7, 0x56, 0xBD, 0x9B, 0x0B, 0x5B, 0x6E }, /* 15.0.0 Device Master Kek Source. */
{ 0xFF, 0xF6, 0x4B, 0x0F, 0xFF, 0x0D, 0xC0, 0x4F, 0x56, 0x8A, 0x40, 0x74, 0x67, 0xC5, 0xFE, 0x9F }, /* 16.0.0 Device Master Kek Source. */
{ 0x4E, 0xCE, 0x7B, 0x2A, 0xEA, 0x2E, 0x3D, 0x16, 0xD5, 0x2A, 0xDE, 0xF6, 0xF8, 0x6A, 0x7D, 0x43 }, /* 17.0.0 Device Master Kek Source. */
{ 0x3B, 0x00, 0x89, 0xD7, 0xA9, 0x9E, 0xB7, 0x70, 0x86, 0x00, 0xC3, 0x49, 0x52, 0x8C, 0xA4, 0xAF }, /* 18.0.0 Device Master Kek Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 MasterKeySources[pkg1::KeyGeneration_Count][se::AesBlockSize] = {
@ -125,6 +128,7 @@ namespace ams::nxboot {
{ 0xB1, 0x81, 0xA6, 0x0D, 0x72, 0xC7, 0xEE, 0x15, 0x21, 0xF3, 0xC0, 0xB5, 0x6B, 0x61, 0x6D, 0xE7 }, /* Master key 0D encrypted with Master key 0E. */
{ 0xAF, 0x11, 0x4C, 0x67, 0x17, 0x7A, 0x52, 0x43, 0xF7, 0x70, 0x2F, 0xC7, 0xEF, 0x81, 0x72, 0x16 }, /* Master key 0E encrypted with Master key 0F. */
{ 0x25, 0x12, 0x8B, 0xCB, 0xB5, 0x46, 0xA1, 0xF8, 0xE0, 0x52, 0x15, 0xB7, 0x0B, 0x57, 0x00, 0xBD }, /* Master key 0F encrypted with Master key 10. */
{ 0x58, 0x15, 0xD2, 0xF6, 0x8A, 0xE8, 0x19, 0xAB, 0xFB, 0x2D, 0x52, 0x9D, 0xE7, 0x55, 0xF3, 0x93 }, /* Master key 10 encrypted with Master key 11. */
};
alignas(se::AesBlockSize) constexpr inline const u8 MasterKeySourcesDev[pkg1::KeyGeneration_Count][se::AesBlockSize] = {
@ -145,6 +149,7 @@ namespace ams::nxboot {
{ 0xD6, 0x80, 0x98, 0xC0, 0xFA, 0xC7, 0x13, 0xCB, 0x93, 0xD2, 0x0B, 0x82, 0x4C, 0xA1, 0x7B, 0x8D }, /* Master key 0D encrypted with Master key 0E. */
{ 0x78, 0x66, 0x19, 0xBD, 0x86, 0xE7, 0xC1, 0x09, 0x9B, 0x6F, 0x92, 0xB2, 0x58, 0x7D, 0xCF, 0x26 }, /* Master key 0E encrypted with Master key 0F. */
{ 0x39, 0x1E, 0x7E, 0xF8, 0x7E, 0x73, 0xEA, 0x6F, 0xAF, 0x00, 0x3A, 0xB4, 0xAA, 0xB8, 0xB7, 0x59 }, /* Master key 0F encrypted with Master key 10. */
{ 0x0C, 0x75, 0x39, 0x15, 0x53, 0xEA, 0x81, 0x11, 0xA3, 0xE0, 0xDC, 0x3D, 0x0E, 0x76, 0xC6, 0xB8 }, /* Master key 10 encrypted with Master key 11. */
};
alignas(se::AesBlockSize) constinit u8 MasterKeys[pkg1::OldMasterKeyCount][se::AesBlockSize] = {};

View file

@ -80,7 +80,7 @@ namespace ams::nxboot {
}
/* Check that the key generation is one that we can use. */
static_assert(pkg1::KeyGeneration_Count == 17);
static_assert(pkg1::KeyGeneration_Count == 18);
if (key_generation >= pkg1::KeyGeneration_Count) {
return false;
}

View file

@ -259,6 +259,8 @@ namespace ams::nxboot {
return ams::TargetFirmware_16_0_0;
} else if (std::memcmp(package1 + 0x10, "20230906", 8) == 0) {
return ams::TargetFirmware_17_0_0;
} else if (std::memcmp(package1 + 0x10, "20240207", 8) == 0) {
return ams::TargetFirmware_18_0_0;
}
break;
default:

View file

@ -171,6 +171,9 @@ namespace ams::nxboot {
FsVersion_17_0_0,
FsVersion_17_0_0_Exfat,
FsVersion_18_0_0,
FsVersion_18_0_0_Exfat,
FsVersion_Count,
};
@ -254,6 +257,9 @@ namespace ams::nxboot {
{ 0x27, 0x07, 0x3B, 0xF0, 0xA1, 0xB8, 0xCE, 0x61 }, /* FsVersion_17_0_0 */
{ 0xEE, 0x0F, 0x4B, 0xAC, 0x6D, 0x1F, 0xFC, 0x4B }, /* FsVersion_17_0_0_Exfat */
{ 0x79, 0x5F, 0x5A, 0x5E, 0xB0, 0xC6, 0x77, 0x9E }, /* FsVersion_18_0_0 */
{ 0x1E, 0x2C, 0x64, 0xB1, 0xCC, 0xE2, 0x78, 0x24 }, /* FsVersion_18_0_0_Exfat */
};
const InitialProcessBinaryHeader *FindInitialProcessBinary(const pkg2::Package2Header *header, const u8 *data, ams::TargetFirmware target_firmware) {
@ -617,6 +623,14 @@ namespace ams::nxboot {
AddPatch(fs_meta, 0x195FA9, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x170060, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_18_0_0:
AddPatch(fs_meta, 0x18AF49, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x164B50, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_18_0_0_Exfat:
AddPatch(fs_meta, 0x195FD9, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x16FBE0, NogcPatch1, sizeof(NogcPatch1));
break;
default:
break;
}

View file

@ -6,7 +6,7 @@
[subrepo]
remote = https://github.com/Atmosphere-NX/Atmosphere-libs
branch = master
commit = bfc55834869fe24f8d94550bc6909a65ae7d35c2
parent = 742fd16080bce8cd664d6244304a771f82e8aa04
commit = fadec2981727636ec7ba81d6c83995b7b9782190
parent = 410f23035efeb9e1bd399a020334793bba95bf91
method = merge
cmdver = 0.4.1

View file

@ -37,6 +37,7 @@ namespace ams::pkg1 {
KeyGeneration_15_0_0 = 0x0E,
KeyGeneration_16_0_0 = 0x0F,
KeyGeneration_17_0_0 = 0x10,
KeyGeneration_18_0_0 = 0x11,
KeyGeneration_Count,

View file

@ -24,7 +24,7 @@ namespace ams::pkg2 {
constexpr inline int PayloadCount = 3;
constexpr inline int MinimumValidDataVersion = 0; /* We allow older package2 to load; this value is currently 0x18 in Nintendo's code. */
constexpr inline int CurrentBootloaderVersion = 0x14;
constexpr inline int CurrentBootloaderVersion = 0x15;
struct Package2Meta {
using Magic = util::FourCC<'P','K','2','1'>;

View file

@ -178,7 +178,7 @@ namespace ams::kern::arch::arm64 {
}
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
Result Finalize();
private:
Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);

View file

@ -30,6 +30,7 @@ namespace ams::kern::arch::arm64 {
KPhysicalAddress phys_addr;
size_t block_size;
u8 sw_reserved_bits;
u8 attr;
constexpr bool IsHeadMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHead) != 0; }
constexpr bool IsHeadAndBodyMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadAndBody) != 0; }

View file

@ -28,8 +28,8 @@ namespace ams::kern::arch::arm64 {
m_page_table.Activate(id);
}
Result Initialize(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
R_RETURN(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, system_resource, resource_limit));
Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit));
}
void Finalize() { m_page_table.Finalize(); }
@ -316,6 +316,8 @@ namespace ams::kern::arch::arm64 {
size_t GetKernelMapRegionSize() const { return m_page_table.GetKernelMapRegionSize(); }
size_t GetAliasCodeRegionSize() const { return m_page_table.GetAliasCodeRegionSize(); }
size_t GetAliasRegionExtraSize() const { return m_page_table.GetAliasRegionExtraSize(); }
size_t GetNormalMemorySize() const { return m_page_table.GetNormalMemorySize(); }
size_t GetCodeSize() const { return m_page_table.GetCodeSize(); }

View file

@ -16,11 +16,10 @@
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp>
namespace ams::kern::arch::arm64::smc {
template<int SmcId, bool DisableInterrupt>
template<int SmcId>
void SecureMonitorCall(u64 *buf) {
/* Load arguments into registers. */
register u64 x0 asm("x0") = buf[0];
@ -32,34 +31,18 @@ namespace ams::kern::arch::arm64::smc {
register u64 x6 asm("x6") = buf[6];
register u64 x7 asm("x7") = buf[7];
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
/* Perform the call. */
if constexpr (DisableInterrupt) {
KScopedInterruptDisable di;
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
} else {
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
}
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
/* Store arguments to output. */
buf[0] = x0;
@ -78,18 +61,18 @@ namespace ams::kern::arch::arm64::smc {
PsciFunction_CpuOn = 0xC4000003,
};
template<int SmcId, bool DisableInterrupt>
template<int SmcId>
u64 PsciCall(PsciFunction function, u64 x1 = 0, u64 x2 = 0, u64 x3 = 0, u64 x4 = 0, u64 x5 = 0, u64 x6 = 0, u64 x7 = 0) {
ams::svc::lp64::SecureMonitorArguments args = { { function, x1, x2, x3, x4, x5, x6, x7 } };
SecureMonitorCall<SmcId, DisableInterrupt>(args.r);
SecureMonitorCall<SmcId>(args.r);
return args.r[0];
}
template<int SmcId, bool DisableInterrupt>
template<int SmcId>
u64 CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
return PsciCall<SmcId, DisableInterrupt>(PsciFunction_CpuOn, core_id, entrypoint, arg);
return PsciCall<SmcId>(PsciFunction_CpuOn, core_id, entrypoint, arg);
}
}

View file

@ -32,6 +32,7 @@ namespace ams::kern {
struct InitialProcessBinaryLayout {
uintptr_t address;
uintptr_t _08;
uintptr_t kern_address;
};
struct InitialProcessBinaryLayoutWithSize {

View file

@ -177,7 +177,7 @@ namespace ams::kern {
};
constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) {
return static_cast<KMemoryPermission>((util::ToUnderlying(perm) & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((util::ToUnderlying(perm) & KMemoryPermission_UserWrite) << KMemoryPermission_KernelShift) | (perm == ams::svc::MemoryPermission_None ? KMemoryPermission_NotMapped : KMemoryPermission_None));
return static_cast<KMemoryPermission>((util::ToUnderlying(perm) & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((util::ToUnderlying(perm) & ams::svc::MemoryPermission_Write) ? KMemoryPermission_KernelWrite : KMemoryPermission_None) | (perm == ams::svc::MemoryPermission_None ? KMemoryPermission_NotMapped : KMemoryPermission_None));
}
enum KMemoryAttribute : u8 {

View file

@ -185,7 +185,7 @@ namespace ams::kern {
}
}
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random);
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index);
public:
KMemoryManager()
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process()
@ -199,7 +199,7 @@ namespace ams::kern {
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option);
NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
Pool GetPool(KPhysicalAddress address) const {

View file

@ -145,6 +145,8 @@ namespace ams::kern {
bool IsEquivalentTo(const KPageGroup &rhs) const;
Result CopyRangeTo(KPageGroup &out, size_t offset, size_t size) const;
ALWAYS_INLINE bool operator==(const KPageGroup &rhs) const {
return this->IsEquivalentTo(rhs);
}

View file

@ -62,18 +62,21 @@ namespace ams::kern {
KPhysicalAddress m_address;
size_t m_size;
bool m_heap;
u8 m_attr;
public:
constexpr MemoryRange() : m_address(Null<KPhysicalAddress>), m_size(0), m_heap(false) { /* ... */ }
constexpr MemoryRange() : m_address(Null<KPhysicalAddress>), m_size(0), m_heap(false), m_attr(0) { /* ... */ }
void Set(KPhysicalAddress address, size_t size, bool heap) {
void Set(KPhysicalAddress address, size_t size, bool heap, u8 attr) {
m_address = address;
m_size = size;
m_heap = heap;
m_attr = attr;
}
constexpr KPhysicalAddress GetAddress() const { return m_address; }
constexpr size_t GetSize() const { return m_size; }
constexpr bool IsHeap() const { return m_heap; }
constexpr u8 GetAttribute() const { return m_attr; }
void Open();
void Close();
@ -86,6 +89,15 @@ namespace ams::kern {
MemoryFillValue_Heap = 'Z',
};
enum RegionType {
RegionType_KernelMap = 0,
RegionType_Stack = 1,
RegionType_Alias = 2,
RegionType_Heap = 3,
RegionType_Count,
};
enum OperationType {
OperationType_Map = 0,
OperationType_MapGroup = 1,
@ -165,15 +177,9 @@ namespace ams::kern {
private:
KProcessAddress m_address_space_start;
KProcessAddress m_address_space_end;
KProcessAddress m_heap_region_start;
KProcessAddress m_heap_region_end;
KProcessAddress m_region_starts[RegionType_Count];
KProcessAddress m_region_ends[RegionType_Count];
KProcessAddress m_current_heap_end;
KProcessAddress m_alias_region_start;
KProcessAddress m_alias_region_end;
KProcessAddress m_stack_region_start;
KProcessAddress m_stack_region_end;
KProcessAddress m_kernel_map_region_start;
KProcessAddress m_kernel_map_region_end;
KProcessAddress m_alias_code_region_start;
KProcessAddress m_alias_code_region_end;
KProcessAddress m_code_region_start;
@ -183,6 +189,7 @@ namespace ams::kern {
size_t m_mapped_unsafe_physical_memory;
size_t m_mapped_insecure_memory;
size_t m_mapped_ipc_server_memory;
size_t m_alias_region_extra_size;
mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock;
KLightLock m_device_map_lock;
@ -203,12 +210,12 @@ namespace ams::kern {
MemoryFillValue m_stack_fill_value;
public:
constexpr explicit KPageTableBase(util::ConstantInitializeTag)
: m_address_space_start(Null<KProcessAddress>), m_address_space_end(Null<KProcessAddress>), m_heap_region_start(Null<KProcessAddress>),
m_heap_region_end(Null<KProcessAddress>), m_current_heap_end(Null<KProcessAddress>), m_alias_region_start(Null<KProcessAddress>),
m_alias_region_end(Null<KProcessAddress>), m_stack_region_start(Null<KProcessAddress>), m_stack_region_end(Null<KProcessAddress>),
m_kernel_map_region_start(Null<KProcessAddress>), m_kernel_map_region_end(Null<KProcessAddress>), m_alias_code_region_start(Null<KProcessAddress>),
: m_address_space_start(Null<KProcessAddress>), m_address_space_end(Null<KProcessAddress>),
m_region_starts{Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>},
m_region_ends{Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>},
m_current_heap_end(Null<KProcessAddress>), m_alias_code_region_start(Null<KProcessAddress>),
m_alias_code_region_end(Null<KProcessAddress>), m_code_region_start(Null<KProcessAddress>), m_code_region_end(Null<KProcessAddress>),
m_max_heap_size(), m_mapped_physical_memory_size(), m_mapped_unsafe_physical_memory(), m_mapped_insecure_memory(), m_mapped_ipc_server_memory(),
m_max_heap_size(), m_mapped_physical_memory_size(), m_mapped_unsafe_physical_memory(), m_mapped_insecure_memory(), m_mapped_ipc_server_memory(), m_alias_region_extra_size(),
m_general_lock(), m_map_physical_memory_lock(), m_device_map_lock(), m_impl(util::ConstantInitialize), m_memory_block_manager(util::ConstantInitialize),
m_allocate_option(), m_address_space_width(), m_is_kernel(), m_enable_aslr(), m_enable_device_address_space_merge(),
m_memory_block_slab_manager(), m_block_info_manager(), m_resource_limit(), m_cached_physical_linear_region(), m_cached_physical_heap_region(),
@ -220,7 +227,7 @@ namespace ams::kern {
explicit KPageTableBase() { /* ... */ }
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_device_address_space_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
void Finalize();
@ -236,7 +243,7 @@ namespace ams::kern {
}
constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
return this->Contains(addr, size) && m_alias_region_start <= addr && addr + size - 1 <= m_alias_region_end - 1;
return this->Contains(addr, size) && m_region_starts[RegionType_Alias] <= addr && addr + size - 1 <= m_region_ends[RegionType_Alias] - 1;
}
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
@ -328,7 +335,7 @@ namespace ams::kern {
Result QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, ams::svc::MemoryState state) const;
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, KMemoryPermission perm);
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties &properties);
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
void RemapPageGroup(PageLinkedList *page_list, KProcessAddress address, size_t size, const KPageGroup &pg);
@ -479,24 +486,30 @@ namespace ams::kern {
}
public:
KProcessAddress GetAddressSpaceStart() const { return m_address_space_start; }
KProcessAddress GetHeapRegionStart() const { return m_heap_region_start; }
KProcessAddress GetAliasRegionStart() const { return m_alias_region_start; }
KProcessAddress GetStackRegionStart() const { return m_stack_region_start; }
KProcessAddress GetKernelMapRegionStart() const { return m_kernel_map_region_start; }
KProcessAddress GetHeapRegionStart() const { return m_region_starts[RegionType_Heap]; }
KProcessAddress GetAliasRegionStart() const { return m_region_starts[RegionType_Alias]; }
KProcessAddress GetStackRegionStart() const { return m_region_starts[RegionType_Stack]; }
KProcessAddress GetKernelMapRegionStart() const { return m_region_starts[RegionType_KernelMap]; }
KProcessAddress GetAliasCodeRegionStart() const { return m_alias_code_region_start; }
size_t GetAddressSpaceSize() const { return m_address_space_end - m_address_space_start; }
size_t GetHeapRegionSize() const { return m_heap_region_end - m_heap_region_start; }
size_t GetAliasRegionSize() const { return m_alias_region_end - m_alias_region_start; }
size_t GetStackRegionSize() const { return m_stack_region_end - m_stack_region_start; }
size_t GetKernelMapRegionSize() const { return m_kernel_map_region_end - m_kernel_map_region_start; }
size_t GetAddressSpaceSize() const { return m_address_space_end - m_address_space_start; }
size_t GetHeapRegionSize() const { return m_region_ends[RegionType_Heap] - m_region_starts[RegionType_Heap]; }
size_t GetAliasRegionSize() const { return m_region_ends[RegionType_Alias] - m_region_starts[RegionType_Alias]; }
size_t GetStackRegionSize() const { return m_region_ends[RegionType_Stack] - m_region_starts[RegionType_Stack]; }
size_t GetKernelMapRegionSize() const { return m_region_ends[RegionType_KernelMap] - m_region_starts[RegionType_KernelMap]; }
size_t GetAliasCodeRegionSize() const { return m_alias_code_region_end - m_alias_code_region_start; }
size_t GetAliasRegionExtraSize() const { return m_alias_region_extra_size; }
size_t GetNormalMemorySize() const {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size;
return (m_current_heap_end - m_region_starts[RegionType_Heap]) + m_mapped_physical_memory_size;
}
size_t GetCodeSize() const;

View file

@ -53,7 +53,7 @@ namespace ams::kern {
static size_t GetRealMemorySize();
static size_t GetIntendedMemorySize();
static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out, KPhysicalAddress kern_base_address);
static bool ShouldIncreaseThreadResourceLimit();
static void TurnOnCpu(u64 core_id, const ams::kern::init::KInitArguments *args);
static size_t GetApplicationPoolSize();

View file

@ -223,6 +223,13 @@ namespace ams::kern::arch::arm64 {
type = ams::svc::ExceptionType_InstructionAbort;
break;
case EsrEc_DataAbortEl0:
/* If esr.IFSC is "Alignment Fault", return UnalignedData instead of DataAbort. */
if ((esr & 0x3F) == 0b100001) {
type = ams::svc::ExceptionType_UnalignedData;
} else {
type = ams::svc::ExceptionType_DataAbort;
}
break;
default:
type = ams::svc::ExceptionType_DataAbort;
break;

View file

@ -207,7 +207,7 @@ namespace ams::kern::arch::arm64 {
R_SUCCEED();
}
Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
/* Get an ASID */
m_asid = g_asid_manager.Reserve();
ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); };
@ -222,10 +222,10 @@ namespace ams::kern::arch::arm64 {
ON_RESULT_FAILURE_2 { m_manager->Free(new_table); };
/* Initialize our base table. */
const size_t as_width = GetAddressSpaceWidth(as_type);
const size_t as_width = GetAddressSpaceWidth(flags);
const KProcessAddress as_start = 0;
const KProcessAddress as_end = (1ul << as_width);
R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, system_resource, resource_limit));
R_TRY(KPageTableBase::InitializeForProcess(flags, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, system_resource, resource_limit));
/* Note that we've updated the table (since we created it). */
this->NoteUpdated();
@ -258,7 +258,7 @@ namespace ams::kern::arch::arm64 {
/* Begin the traversal. */
TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 };
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 };
bool cur_valid = false;
TraversalEntry next_entry;
bool next_valid;
@ -268,7 +268,9 @@ namespace ams::kern::arch::arm64 {
/* Iterate over entries. */
while (true) {
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
/* NOTE: Nintendo really does check next_entry.attr == (cur_entry.attr != 0)...but attr is always zero as of 18.0.0, and this is "probably" for the new console or debug-only anyway, */
/* so we'll implement the weird logic verbatim even though it doesn't match the GetContiguousRange logic. */
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size && next_entry.attr == (cur_entry.attr ? 1 : 0))) {
cur_entry.block_size += next_entry.block_size;
} else {
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {

View file

@ -46,12 +46,14 @@ namespace ams::kern::arch::arm64 {
out_entry->block_size = L3BlockSize;
}
out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits();
out_entry->attr = 0;
return true;
} else {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L3BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
return false;
}
}
@ -69,6 +71,7 @@ namespace ams::kern::arch::arm64 {
out_entry->block_size = L2BlockSize;
}
out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits();
out_entry->attr = 0;
/* Set the output context. */
out_context->l3_entry = nullptr;
@ -79,6 +82,8 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L2BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l3_entry = nullptr;
return false;
}
@ -108,6 +113,8 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr;
return false;
@ -119,6 +126,7 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l1_entry = m_table + m_num_entries;
out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr;
@ -220,6 +228,7 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
context->l1_entry = m_table + m_num_entries;
context->l2_entry = nullptr;
context->l3_entry = nullptr;

View file

@ -68,7 +68,8 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
/* Check if our disable count allows us to call SVCs. */
mrs x10, tpidrro_el0
ldrh w10, [x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT)]
add x10, x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT)
ldtrh w10, [x10]
cbz w10, 1f
/* It might not, so check the stack params to see if we must not allow the SVC. */
@ -352,7 +353,8 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
/* Check if our disable count allows us to call SVCs. */
mrs x10, tpidrro_el0
ldrh w10, [x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT)]
add x10, x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT)
ldtrh w10, [x10]
cbz w10, 1f
/* It might not, so check the stack params to see if we must not allow the SVC. */

View file

@ -296,7 +296,7 @@ namespace ams::kern::board::nintendo::nx {
/* TODO: Move this into a header for the MC in general. */
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
u32 config_value;
MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
smc::init::ReadWriteRegister(std::addressof(config_value), MemoryControllerConfigurationRegister, 0, 0);
return static_cast<size_t>(config_value & 0x3FFF) << 20;
}
@ -387,7 +387,7 @@ namespace ams::kern::board::nintendo::nx {
}
void KSystemControl::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor, false>(core_id, entrypoint, arg)) == 0);
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor>(core_id, entrypoint, arg)) == 0);
}
/* Randomness for Initialization. */
@ -601,8 +601,9 @@ namespace ams::kern::board::nintendo::nx {
if (g_call_smc_on_panic) {
/* If we should, instruct the secure monitor to display a panic screen. */
smc::Panic(0xF00);
smc::ShowError(0xF00);
}
AMS_INFINITE_LOOP();
}

View file

@ -43,7 +43,7 @@ namespace ams::kern::board::nintendo::nx::smc {
enum FunctionId : u32 {
FunctionId_GetConfig = 0xC3000004,
FunctionId_GenerateRandomBytes = 0xC3000005,
FunctionId_Panic = 0xC3000006,
FunctionId_ShowError = 0xC3000006,
FunctionId_ConfigureCarveout = 0xC3000007,
FunctionId_ReadWriteRegister = 0xC3000008,
@ -51,122 +51,187 @@ namespace ams::kern::board::nintendo::nx::smc {
FunctionId_SetConfig = 0xC3000409,
};
constexpr size_t GenerateRandomBytesSizeMax = sizeof(::ams::svc::lp64::SecureMonitorArguments) - sizeof(::ams::svc::lp64::SecureMonitorArguments{}.r[0]);
/* Global lock for generate random bytes. */
constinit KSpinLock g_generate_random_lock;
bool TryGetConfigImpl(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* If successful, copy the output. */
const bool success = static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
if (AMS_LIKELY(success)) {
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.r[1 + i];
}
}
return success;
}
bool SetConfigImpl(ConfigItem config_item, u64 value) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool ReadWriteRegisterImpl(u32 *out, u64 address, u32 mask, u32 value) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Unconditionally write the output. */
*out = static_cast<u32>(args.r[1]);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool GenerateRandomBytesImpl(void *dst, size_t size) {
/* Create the arguments. */
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* If successful, copy the output. */
const bool success = static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
if (AMS_LIKELY(success)) {
std::memcpy(dst, std::addressof(args.r[1]), size);
}
return success;
}
bool ConfigureCarveoutImpl(size_t which, uintptr_t address, size_t size) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool ShowErrorImpl(u32 color) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ShowError, color } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_User>(args->r);
}
}
/* SMC functionality needed for init. */
namespace init {
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.r[1 + i];
}
/* Ensure we successfully get the config. */
MESOSPHERE_INIT_ABORT_UNLESS(TryGetConfigImpl(out, num_qwords, config_item));
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Call SmcGenerateRandomBytes() */
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
MESOSPHERE_INIT_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
/* Check that the size is valid. */
MESOSPHERE_INIT_ABORT_UNLESS(0 < size && size <= GenerateRandomBytesSizeMax);
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
/* Copy output. */
std::memcpy(dst, std::addressof(args.r[1]), size);
/* Ensure we successfully generate the random bytes. */
MESOSPHERE_INIT_ABORT_UNLESS(GenerateRandomBytesImpl(dst, size));
}
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
*out = args.r[1];
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
void ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) {
/* Ensure we successfully access the register. */
MESOSPHERE_INIT_ABORT_UNLESS(ReadWriteRegisterImpl(out, address, mask, value));
}
}
bool TryGetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
if (AMS_UNLIKELY(static_cast<SmcResult>(args.r[0]) != SmcResult::Success)) {
return false;
}
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.r[1 + i];
}
return true;
/* Get the config. */
return TryGetConfigImpl(out, num_qwords, config_item);
}
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Ensure we successfully get the config. */
MESOSPHERE_ABORT_UNLESS(TryGetConfig(out, num_qwords, config_item));
}
bool SetConfig(ConfigItem config_item, u64 value) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value } };
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
/* Set the config. */
return SetConfigImpl(config_item, value);
}
bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
*out = static_cast<u32>(args.r[1]);
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
/* Access the register. */
return ReadWriteRegisterImpl(out, address, mask, value);
}
void ConfigureCarveout(size_t which, uintptr_t address, size_t size) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) } };
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
/* Ensure that we successfully configure the carveout. */
MESOSPHERE_ABORT_UNLESS(ConfigureCarveoutImpl(which, address, size));
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Setup for call. */
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
/* Check that the size is valid. */
MESOSPHERE_ABORT_UNLESS(0 < size && size <= GenerateRandomBytesSizeMax);
/* Make call. */
{
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(g_generate_random_lock);
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
}
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
/* Acquire the exclusive right to generate random bytes. */
KScopedSpinLock lk(g_generate_random_lock);
/* Copy output. */
std::memcpy(dst, std::addressof(args.r[1]), size);
/* Ensure we successfully generate the random bytes. */
MESOSPHERE_ABORT_UNLESS(GenerateRandomBytesImpl(dst, size));
}
void NORETURN Panic(u32 color) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_Panic, color } };
void ShowError(u32 color) {
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
AMS_INFINITE_LOOP();
/* Ensure we successfully show the error. */
MESOSPHERE_ABORT_UNLESS(ShowErrorImpl(color));
}
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_User, true>(args->r);
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Perform the call. */
CallSecureMonitorFromUserImpl(args);
}
}

View file

@ -111,7 +111,7 @@ namespace ams::kern::board::nintendo::nx::smc {
bool SetConfig(ConfigItem config_item, u64 value);
void NORETURN Panic(u32 color);
void ShowError(u32 color);
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
@ -119,7 +119,7 @@ namespace ams::kern::board::nintendo::nx::smc {
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
void GenerateRandomBytes(void *dst, size_t size);
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
void ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
}

View file

@ -136,7 +136,7 @@ namespace ams::kern {
{
/* Allocate the previously unreserved pages. */
KPageGroup unreserve_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
/* Add the previously reserved pages. */
if (src_pool == dst_pool && binary_pages != 0) {
@ -173,7 +173,7 @@ namespace ams::kern {
/* If the pool is the same, we need to use the workaround page group. */
if (src_pool == dst_pool) {
/* Allocate a new, usable group for the process. */
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast<size_t>(params.code_num_pages), KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast<size_t>(params.code_num_pages), 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
/* Copy data from the working page group to the usable one. */
auto work_it = pg.begin();

View file

@ -79,29 +79,7 @@ namespace ams::kern {
/* Create a page group representing the segment. */
KPageGroup segment_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
if (size_t remaining_size = util::AlignUp(seg_size, PageSize); remaining_size != 0) {
/* Find the pages whose data corresponds to the segment. */
size_t cur_offset = 0;
for (auto it = pg.begin(); it != pg.end() && remaining_size > 0; ++it) {
/* Get the current size. */
const size_t cur_size = it->GetSize();
/* Determine if the offset is in range. */
const size_t rel_diff = seg_offset - cur_offset;
const bool is_before = cur_offset <= seg_offset;
cur_offset += cur_size;
if (is_before && seg_offset < cur_offset) {
/* It is, so add the block. */
const size_t block_size = std::min<size_t>(cur_size - rel_diff, remaining_size);
MESOSPHERE_R_ABORT_UNLESS(segment_pg.AddBlock(it->GetAddress() + rel_diff, block_size / PageSize));
/* Advance. */
cur_offset = seg_offset + block_size;
remaining_size -= block_size;
seg_offset += block_size;
}
}
}
MESOSPHERE_R_ABORT_UNLESS(pg.CopyRangeTo(segment_pg, seg_offset, util::AlignUp(seg_size, PageSize)));
/* Setup the new page group's memory so that we can load the segment. */
{
@ -226,6 +204,9 @@ namespace ams::kern {
const uintptr_t map_end = map_start + map_size;
MESOSPHERE_ABORT_UNLESS(start_address == 0);
/* Default fields in parameter to zero. */
*out = {};
/* Set fields in parameter. */
out->code_address = map_start + start_address;
out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize;

View file

@ -225,7 +225,7 @@ namespace ams::kern {
return allocated_block;
}
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random) {
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index) {
/* Choose a heap based on our page size request. */
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
@ -241,7 +241,7 @@ namespace ams::kern {
};
/* Keep allocating until we've allocated all our pages. */
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
for (s32 index = heap_index; index >= min_heap_index && num_pages > 0; index--) {
const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
while (num_pages >= pages_per_alloc) {
@ -274,7 +274,7 @@ namespace ams::kern {
R_SUCCEED();
}
Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option) {
Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option) {
MESOSPHERE_ASSERT(out != nullptr);
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
@ -285,8 +285,11 @@ namespace ams::kern {
const auto [pool, dir] = DecodeOption(option);
KScopedLightLock lk(m_pool_locks[pool]);
/* Choose a heap based on our alignment size request. */
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(align_pages, align_pages);
/* Allocate the page group. */
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true));
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true, heap_index));
/* Open the first reference to the pages. */
for (const auto &block : *out) {
@ -326,8 +329,11 @@ namespace ams::kern {
const bool has_optimized = m_has_optimized_process[pool];
const bool is_optimized = m_optimized_process_ids[pool] == process_id;
/* Always use the minimum alignment size. */
const s32 heap_index = 0;
/* Allocate the page group. */
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false));
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false, heap_index));
/* Set whether we should optimize. */
optimized = has_optimized && is_optimized;

View file

@ -84,6 +84,58 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageGroup::CopyRangeTo(KPageGroup &out, size_t range_offset, size_t range_size) const {
/* Get the previous last block for the group. */
KBlockInfo * const out_last = out.m_last_block;
const auto out_last_addr = out_last != nullptr ? out_last->GetAddress() : Null<KPhysicalAddress>;
const auto out_last_np = out_last != nullptr ? out_last->GetNumPages() : 0;
/* Ensure we cleanup the group on failure. */
ON_RESULT_FAILURE {
KBlockInfo *cur = out_last != nullptr ? out_last->GetNext() : out.m_first_block;
while (cur != nullptr) {
KBlockInfo *next = cur->GetNext();
out.m_manager->Free(cur);
cur = next;
}
if (out_last != nullptr) {
out_last->Initialize(out_last_addr, out_last_np);
out_last->SetNext(nullptr);
} else {
out.m_first_block = nullptr;
}
out.m_last_block = out_last;
};
/* Find the pages within the requested range. */
size_t cur_offset = 0, remaining_size = range_size;
for (auto it = this->begin(); it != this->end() && remaining_size > 0; ++it) {
/* Get the current size. */
const size_t cur_size = it->GetSize();
/* Determine if the offset is in range. */
const size_t rel_diff = range_offset - cur_offset;
const bool is_before = cur_offset <= range_offset;
cur_offset += cur_size;
if (is_before && range_offset < cur_offset) {
/* It is, so add the block. */
const size_t block_size = std::min<size_t>(cur_size - rel_diff, remaining_size);
R_TRY(out.AddBlock(it->GetAddress() + rel_diff, block_size / PageSize));
/* Advance. */
cur_offset = range_offset + block_size;
remaining_size -= block_size;
range_offset += block_size;
}
}
/* Check that we successfully copied the range. */
MESOSPHERE_ABORT_UNLESS(remaining_size == 0);
R_SUCCEED();
}
void KPageGroup::Open() const {
auto &mm = Kernel::GetMemoryManager();

View file

@ -97,15 +97,12 @@ namespace ams::kern {
m_enable_aslr = true;
m_enable_device_address_space_merge = false;
m_heap_region_start = 0;
m_heap_region_end = 0;
for (auto i = 0; i < RegionType_Count; ++i) {
m_region_starts[i] = 0;
m_region_ends[i] = 0;
}
m_current_heap_end = 0;
m_alias_region_start = 0;
m_alias_region_end = 0;
m_stack_region_start = 0;
m_stack_region_end = 0;
m_kernel_map_region_start = 0;
m_kernel_map_region_end = 0;
m_alias_code_region_start = 0;
m_alias_code_region_end = 0;
m_code_region_start = 0;
@ -115,6 +112,7 @@ namespace ams::kern {
m_mapped_unsafe_physical_memory = 0;
m_mapped_insecure_memory = 0;
m_mapped_ipc_server_memory = 0;
m_alias_region_extra_size = 0;
m_memory_block_slab_manager = Kernel::GetSystemSystemResource().GetMemoryBlockSlabManagerPointer();
m_block_info_manager = Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer();
@ -135,7 +133,7 @@ namespace ams::kern {
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager));
}
Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
/* Validate the region. */
MESOSPHERE_ABORT_UNLESS(start <= code_address);
MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size);
@ -149,13 +147,16 @@ namespace ams::kern {
return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
};
/* Default to zero alias region extra size. */
m_alias_region_extra_size = 0;
/* Set our width and heap/alias sizes. */
m_address_space_width = GetAddressSpaceWidth(as_type);
m_address_space_width = GetAddressSpaceWidth(flags);
size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias);
size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap);
/* Adjust heap/alias size if we don't have an alias region. */
if ((as_type & ams::svc::CreateProcessFlag_AddressSpaceMask) == ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias) {
if ((flags & ams::svc::CreateProcessFlag_AddressSpaceMask) == ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias) {
heap_region_size += alias_region_size;
alias_region_size = 0;
}
@ -165,35 +166,57 @@ namespace ams::kern {
KProcessAddress process_code_end;
size_t stack_region_size;
size_t kernel_map_region_size;
KProcessAddress before_process_code_start, after_process_code_start;
size_t before_process_code_size, after_process_code_size;
if (m_address_space_width == 39) {
alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias);
heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap);
stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Stack);
kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type_MapSmall);
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Map39Bit);
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Map39Bit);
m_alias_code_region_start = m_code_region_start;
m_alias_code_region_end = m_code_region_end;
process_code_start = util::AlignDown(GetInteger(code_address), RegionAlignment);
process_code_end = util::AlignUp(GetInteger(code_address) + code_size, RegionAlignment);
stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Stack);
kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type_MapSmall);
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Map39Bit);
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Map39Bit);
m_alias_code_region_start = m_code_region_start;
m_alias_code_region_end = m_code_region_end;
process_code_start = util::AlignDown(GetInteger(code_address), RegionAlignment);
process_code_end = util::AlignUp(GetInteger(code_address) + code_size, RegionAlignment);
before_process_code_start = m_code_region_start;
before_process_code_size = process_code_start - before_process_code_start;
after_process_code_start = process_code_end;
after_process_code_size = m_code_region_end - process_code_end;
/* If we have a 39-bit address space and should, enable extra size to the alias region. */
if (flags & ams::svc::CreateProcessFlag_EnableAliasRegionExtraSize) {
/* Extra size is 1/8th of the address space. */
m_alias_region_extra_size = (static_cast<size_t>(1) << m_address_space_width) / 8;
alias_region_size += m_alias_region_extra_size;
}
} else {
stack_region_size = 0;
kernel_map_region_size = 0;
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_MapSmall);
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_MapSmall);
m_stack_region_start = m_code_region_start;
m_alias_code_region_start = m_code_region_start;
m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge) + GetSpaceSize(KAddressSpaceInfo::Type_MapLarge);
m_stack_region_end = m_code_region_end;
m_kernel_map_region_start = m_code_region_start;
m_kernel_map_region_end = m_code_region_end;
process_code_start = m_code_region_start;
process_code_end = m_code_region_end;
stack_region_size = 0;
kernel_map_region_size = 0;
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_MapSmall);
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_MapSmall);
m_alias_code_region_start = m_code_region_start;
m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge) + GetSpaceSize(KAddressSpaceInfo::Type_MapLarge);
m_region_starts[RegionType_Stack] = m_code_region_start;
m_region_ends[RegionType_Stack] = m_code_region_end;
m_region_starts[RegionType_KernelMap] = m_code_region_start;
m_region_ends[RegionType_KernelMap] = m_code_region_end;
process_code_start = m_code_region_start;
process_code_end = m_code_region_end;
before_process_code_start = m_code_region_start;
before_process_code_size = 0;
after_process_code_start = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge);
after_process_code_size = GetSpaceSize(KAddressSpaceInfo::Type_MapLarge);
}
/* Set other basic fields. */
m_enable_aslr = enable_aslr;
m_enable_device_address_space_merge = enable_das_merge;
m_enable_aslr = (flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
m_enable_device_address_space_merge = (flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
m_address_space_start = start;
m_address_space_end = end;
m_is_kernel = false;
@ -201,100 +224,285 @@ namespace ams::kern {
m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
m_resource_limit = resource_limit;
/* Determine the region we can place our undetermineds in. */
KProcessAddress alloc_start;
size_t alloc_size;
if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >= (GetInteger(end) - GetInteger(process_code_end))) {
alloc_start = m_code_region_start;
alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start);
} else {
alloc_start = process_code_end;
alloc_size = GetInteger(end) - GetInteger(process_code_end);
}
const size_t needed_size = (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
R_UNLESS(alloc_size >= needed_size, svc::ResultOutOfMemory());
/* Set up our undetermined regions. */
{
/* Declare helper structure for layout process. */
struct RegionLayoutInfo {
size_t size;
RegionType type;
s32 alloc_index; /* 0 for before process code, 1 for after process code */
};
const size_t remaining_size = alloc_size - needed_size;
/* Create region layout info array, and add regions to it. */
RegionLayoutInfo region_layouts[RegionType_Count] = {};
size_t num_regions = 0;
/* Determine random placements for each region. */
size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0;
if (enable_aslr) {
alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
}
if (kernel_map_region_size > 0) { region_layouts[num_regions++] = { .size = kernel_map_region_size, .type = RegionType_KernelMap, .alloc_index = 0, }; }
if (stack_region_size > 0) { region_layouts[num_regions++] = { .size = stack_region_size, .type = RegionType_Stack, .alloc_index = 0, }; }
/* Setup heap and alias regions. */
m_alias_region_start = alloc_start + alias_rnd;
m_alias_region_end = m_alias_region_start + alias_region_size;
m_heap_region_start = alloc_start + heap_rnd;
m_heap_region_end = m_heap_region_start + heap_region_size;
region_layouts[num_regions++] = { .size = alias_region_size, .type = RegionType_Alias, .alloc_index = 0, };
region_layouts[num_regions++] = { .size = heap_region_size, .type = RegionType_Heap, .alloc_index = 0, };
if (alias_rnd <= heap_rnd) {
m_heap_region_start += alias_region_size;
m_heap_region_end += alias_region_size;
} else {
m_alias_region_start += heap_region_size;
m_alias_region_end += heap_region_size;
}
/* Setup stack region. */
if (stack_region_size) {
m_stack_region_start = alloc_start + stack_rnd;
m_stack_region_end = m_stack_region_start + stack_region_size;
if (alias_rnd < stack_rnd) {
m_stack_region_start += alias_region_size;
m_stack_region_end += alias_region_size;
} else {
m_alias_region_start += stack_region_size;
m_alias_region_end += stack_region_size;
/* Selection-sort the regions by size largest-to-smallest. */
for (size_t i = 0; i < num_regions - 1; ++i) {
for (size_t j = i + 1; j < num_regions; ++j) {
if (region_layouts[i].size < region_layouts[j].size) {
std::swap(region_layouts[i], region_layouts[j]);
}
}
}
if (heap_rnd < stack_rnd) {
m_stack_region_start += heap_region_size;
m_stack_region_end += heap_region_size;
} else {
m_heap_region_start += stack_region_size;
m_heap_region_end += stack_region_size;
}
}
/* Layout the regions. */
constexpr auto AllocIndexCount = 2;
KProcessAddress alloc_starts[AllocIndexCount] = { before_process_code_start, after_process_code_start };
size_t alloc_sizes[AllocIndexCount] = { before_process_code_size, after_process_code_size };
size_t alloc_counts[AllocIndexCount] = {};
for (size_t i = 0; i < num_regions; ++i) {
/* Get reference to the current region. */
auto &cur_region = region_layouts[i];
/* Setup kernel map region. */
if (kernel_map_region_size) {
m_kernel_map_region_start = alloc_start + kmap_rnd;
m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
/* Determine where the current region should go. */
cur_region.alloc_index = alloc_sizes[1] >= alloc_sizes[0] ? 1 : 0;
++alloc_counts[cur_region.alloc_index];
if (alias_rnd < kmap_rnd) {
m_kernel_map_region_start += alias_region_size;
m_kernel_map_region_end += alias_region_size;
} else {
m_alias_region_start += kernel_map_region_size;
m_alias_region_end += kernel_map_region_size;
/* Check that the current region can fit. */
R_UNLESS(alloc_sizes[cur_region.alloc_index] >= cur_region.size, svc::ResultOutOfMemory());
/* Update our remaining size tracking. */
alloc_sizes[cur_region.alloc_index] -= cur_region.size;
}
if (heap_rnd < kmap_rnd) {
m_kernel_map_region_start += heap_region_size;
m_kernel_map_region_end += heap_region_size;
} else {
m_heap_region_start += kernel_map_region_size;
m_heap_region_end += kernel_map_region_size;
/* Selection sort the regions to coalesce them by alloc index. */
for (size_t i = 0; i < num_regions - 1; ++i) {
for (size_t j = i + 1; j < num_regions; ++j) {
if (region_layouts[i].alloc_index > region_layouts[j].alloc_index) {
std::swap(region_layouts[i], region_layouts[j]);
}
}
}
if (stack_region_size) {
if (stack_rnd < kmap_rnd) {
m_kernel_map_region_start += stack_region_size;
m_kernel_map_region_end += stack_region_size;
/* Layout the regions for each alloc index. */
for (auto cur_alloc_index = 0; cur_alloc_index < AllocIndexCount; ++cur_alloc_index) {
/* If there are no regions to place, continue. */
const size_t cur_alloc_count = alloc_counts[cur_alloc_index];
if (cur_alloc_count == 0) {
continue;
}
/* Determine the starting region index for the current alloc index. */
size_t cur_region_index = 0;
for (size_t i = 0; i < num_regions; ++i) {
if (region_layouts[i].alloc_index == cur_alloc_index) {
cur_region_index = i;
break;
}
}
/* If aslr is enabled, randomize the current region order. Otherwise, sort by type. */
if (m_enable_aslr) {
for (size_t i = 0; i < cur_alloc_count - 1; ++i) {
std::swap(region_layouts[cur_region_index + i], region_layouts[cur_region_index + KSystemControl::GenerateRandomRange(i, cur_alloc_count - 1)]);
}
} else {
m_stack_region_start += kernel_map_region_size;
m_stack_region_end += kernel_map_region_size;
for (size_t i = 0; i < cur_alloc_count - 1; ++i) {
for (size_t j = i + 1; j < cur_alloc_count; ++j) {
if (region_layouts[cur_region_index + i].type > region_layouts[cur_region_index + j].type) {
std::swap(region_layouts[cur_region_index + i], region_layouts[cur_region_index + j]);
}
}
}
}
/* Determine aslr offsets for the current space. */
size_t aslr_offsets[RegionType_Count] = {};
if (m_enable_aslr) {
/* Generate the aslr offsets. */
for (size_t i = 0; i < cur_alloc_count; ++i) {
aslr_offsets[i] = KSystemControl::GenerateRandomRange(0, alloc_sizes[cur_alloc_index] / RegionAlignment) * RegionAlignment;
}
/* Sort the aslr offsets. */
for (size_t i = 0; i < cur_alloc_count - 1; ++i) {
for (size_t j = i + 1; j < cur_alloc_count; ++j) {
if (aslr_offsets[i] > aslr_offsets[j]) {
std::swap(aslr_offsets[i], aslr_offsets[j]);
}
}
}
}
/* Calculate final region positions. */
KProcessAddress prev_region_end = alloc_starts[cur_alloc_index];
size_t prev_aslr_offset = 0;
for (size_t i = 0; i < cur_alloc_count; ++i) {
/* Get the current region. */
auto &cur_region = region_layouts[cur_region_index + i];
/* Set the current region start/end. */
m_region_starts[cur_region.type] = (aslr_offsets[i] - prev_aslr_offset) + GetInteger(prev_region_end);
m_region_ends[cur_region.type] = m_region_starts[cur_region.type] + cur_region.size;
/* Update tracking variables. */
prev_region_end = m_region_ends[cur_region.type];
prev_aslr_offset = aslr_offsets[i];
}
}
/* Declare helpers to check that regions are inside our address space. */
const KProcessAddress process_code_last = process_code_end - 1;
auto IsInAddressSpace = [&](KProcessAddress addr) ALWAYS_INLINE_LAMBDA { return m_address_space_start <= addr && addr <= m_address_space_end; };
/* Ensure that the KernelMap region is valid. */
for (size_t k = 0; k < num_regions; ++k) {
if (const auto &kmap_region = region_layouts[k]; kmap_region.type == RegionType_KernelMap) {
/* If there's no kmap region, we have nothing to check. */
if (kmap_region.size == 0) {
break;
}
/* Check that the kmap region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_KernelMap]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_KernelMap]));
/* Check for overlap with process code. */
const KProcessAddress kmap_start = m_region_starts[RegionType_KernelMap];
const KProcessAddress kmap_last = m_region_ends[RegionType_KernelMap] - 1;
MESOSPHERE_ABORT_UNLESS(kernel_map_region_size == 0 || kmap_last < process_code_start || process_code_last < kmap_start);
/* Check for overlap with stack. */
for (size_t s = 0; s < num_regions; ++s) {
if (const auto &stack_region = region_layouts[s]; stack_region.type == RegionType_Stack) {
if (stack_region.size != 0) {
const KProcessAddress stack_start = m_region_starts[RegionType_Stack];
const KProcessAddress stack_last = m_region_ends[RegionType_Stack] - 1;
MESOSPHERE_ABORT_UNLESS((kernel_map_region_size == 0 && stack_region_size == 0) || kmap_last < stack_start || stack_last < kmap_start);
}
break;
}
}
/* Check for overlap with alias. */
for (size_t a = 0; a < num_regions; ++a) {
if (const auto &alias_region = region_layouts[a]; alias_region.type == RegionType_Alias) {
if (alias_region.size != 0) {
const KProcessAddress alias_start = m_region_starts[RegionType_Alias];
const KProcessAddress alias_last = m_region_ends[RegionType_Alias] - 1;
MESOSPHERE_ABORT_UNLESS(kmap_last < alias_start || alias_last < kmap_start);
}
break;
}
}
/* Check for overlap with heap. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
if (heap_region.size != 0) {
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(kmap_last < heap_start || heap_last < kmap_start);
}
break;
}
}
}
}
/* Check that the Stack region is valid. */
for (size_t s = 0; s < num_regions; ++s) {
if (const auto &stack_region = region_layouts[s]; stack_region.type == RegionType_Stack) {
/* If there's no stack region, we have nothing to check. */
if (stack_region.size == 0) {
break;
}
/* Check that the stack region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_Stack]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_Stack]));
/* Check for overlap with process code. */
const KProcessAddress stack_start = m_region_starts[RegionType_Stack];
const KProcessAddress stack_last = m_region_ends[RegionType_Stack] - 1;
MESOSPHERE_ABORT_UNLESS(stack_region_size == 0 || stack_last < process_code_start || process_code_last < stack_start);
/* Check for overlap with alias. */
for (size_t a = 0; a < num_regions; ++a) {
if (const auto &alias_region = region_layouts[a]; alias_region.type == RegionType_Alias) {
if (alias_region.size != 0) {
const KProcessAddress alias_start = m_region_starts[RegionType_Alias];
const KProcessAddress alias_last = m_region_ends[RegionType_Alias] - 1;
MESOSPHERE_ABORT_UNLESS(stack_last < alias_start || alias_last < stack_start);
}
break;
}
}
/* Check for overlap with heap. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
if (heap_region.size != 0) {
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(stack_last < heap_start || heap_last < stack_start);
}
break;
}
}
}
}
/* Check that the Alias region is valid. */
for (size_t a = 0; a < num_regions; ++a) {
if (const auto &alias_region = region_layouts[a]; alias_region.type == RegionType_Alias) {
/* If there's no alias region, we have nothing to check. */
if (alias_region.size == 0) {
break;
}
/* Check that the alias region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_Alias]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_Alias]));
/* Check for overlap with process code. */
const KProcessAddress alias_start = m_region_starts[RegionType_Alias];
const KProcessAddress alias_last = m_region_ends[RegionType_Alias] - 1;
MESOSPHERE_ABORT_UNLESS(alias_last < process_code_start || process_code_last < alias_start);
/* Check for overlap with heap. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
if (heap_region.size != 0) {
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(alias_last < heap_start || heap_last < alias_start);
}
break;
}
}
}
}
/* Check that the Heap region is valid. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
/* If there's no heap region, we have nothing to check. */
if (heap_region.size == 0) {
break;
}
/* Check that the heap region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_Heap]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_Heap]));
/* Check for overlap with process code. */
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(heap_last < process_code_start || process_code_last < heap_start);
}
}
}
/* Set heap and fill members. */
m_current_heap_end = m_heap_region_start;
m_current_heap_end = m_region_starts[RegionType_Heap];
m_max_heap_size = 0;
m_mapped_physical_memory_size = 0;
m_mapped_unsafe_physical_memory = 0;
@ -309,32 +517,6 @@ namespace ams::kern {
/* Set allocation option. */
m_allocate_option = KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction_FromBack : KMemoryManager::Direction_FromFront);
/* Ensure that we regions inside our address space. */
auto IsInAddressSpace = [&](KProcessAddress addr) ALWAYS_INLINE_LAMBDA { return m_address_space_start <= addr && addr <= m_address_space_end; };
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_alias_region_start));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_alias_region_end));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_heap_region_start));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_heap_region_end));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_stack_region_start));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_stack_region_end));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_kernel_map_region_start));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_kernel_map_region_end));
/* Ensure that we selected regions that don't overlap. */
const KProcessAddress alias_start = m_alias_region_start;
const KProcessAddress alias_last = m_alias_region_end - 1;
const KProcessAddress heap_start = m_heap_region_start;
const KProcessAddress heap_last = m_heap_region_end - 1;
const KProcessAddress stack_start = m_stack_region_start;
const KProcessAddress stack_last = m_stack_region_end - 1;
const KProcessAddress kmap_start = m_kernel_map_region_start;
const KProcessAddress kmap_last = m_kernel_map_region_end - 1;
MESOSPHERE_ABORT_UNLESS(alias_last < heap_start || heap_last < alias_start);
MESOSPHERE_ABORT_UNLESS(alias_last < stack_start || stack_last < alias_start);
MESOSPHERE_ABORT_UNLESS(alias_last < kmap_start || kmap_last < alias_start);
MESOSPHERE_ABORT_UNLESS(heap_last < stack_start || stack_last < heap_start);
MESOSPHERE_ABORT_UNLESS(heap_last < kmap_start || kmap_last < heap_start);
/* Initialize our implementation. */
m_impl.InitializeForProcess(table, GetInteger(start), GetInteger(end));
@ -374,16 +556,16 @@ namespace ams::kern {
case ams::svc::MemoryState_Kernel:
return m_address_space_start;
case ams::svc::MemoryState_Normal:
return m_heap_region_start;
return m_region_starts[RegionType_Heap];
case ams::svc::MemoryState_Ipc:
case ams::svc::MemoryState_NonSecureIpc:
case ams::svc::MemoryState_NonDeviceIpc:
return m_alias_region_start;
return m_region_starts[RegionType_Alias];
case ams::svc::MemoryState_Stack:
return m_stack_region_start;
return m_region_starts[RegionType_Stack];
case ams::svc::MemoryState_Static:
case ams::svc::MemoryState_ThreadLocal:
return m_kernel_map_region_start;
return m_region_starts[RegionType_KernelMap];
case ams::svc::MemoryState_Io:
case ams::svc::MemoryState_Shared:
case ams::svc::MemoryState_AliasCode:
@ -409,16 +591,16 @@ namespace ams::kern {
case ams::svc::MemoryState_Kernel:
return m_address_space_end - m_address_space_start;
case ams::svc::MemoryState_Normal:
return m_heap_region_end - m_heap_region_start;
return m_region_ends[RegionType_Heap] - m_region_starts[RegionType_Heap];
case ams::svc::MemoryState_Ipc:
case ams::svc::MemoryState_NonSecureIpc:
case ams::svc::MemoryState_NonDeviceIpc:
return m_alias_region_end - m_alias_region_start;
return m_region_ends[RegionType_Alias] - m_region_starts[RegionType_Alias];
case ams::svc::MemoryState_Stack:
return m_stack_region_end - m_stack_region_start;
return m_region_ends[RegionType_Stack] - m_region_starts[RegionType_Stack];
case ams::svc::MemoryState_Static:
case ams::svc::MemoryState_ThreadLocal:
return m_kernel_map_region_end - m_kernel_map_region_start;
return m_region_ends[RegionType_KernelMap] - m_region_starts[RegionType_KernelMap];
case ams::svc::MemoryState_Io:
case ams::svc::MemoryState_Shared:
case ams::svc::MemoryState_AliasCode:
@ -446,8 +628,8 @@ namespace ams::kern {
const size_t region_size = this->GetRegionSize(state);
const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1;
const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || m_heap_region_start == m_heap_region_end);
const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || m_alias_region_start == m_alias_region_end);
const bool is_in_heap = !(end <= m_region_starts[RegionType_Heap] || m_region_ends[RegionType_Heap] <= addr || m_region_starts[RegionType_Heap] == m_region_ends[RegionType_Heap]);
const bool is_in_alias = !(end <= m_region_starts[RegionType_Alias] || m_region_ends[RegionType_Alias] <= addr || m_region_starts[RegionType_Alias] == m_region_ends[RegionType_Alias]);
switch (state) {
case ams::svc::MemoryState_Free:
case ams::svc::MemoryState_Kernel:
@ -734,7 +916,7 @@ namespace ams::kern {
/* Begin traversal. */
TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 };
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 };
bool cur_valid = false;
TraversalEntry next_entry;
bool next_valid;
@ -1084,7 +1266,7 @@ namespace ams::kern {
/* Allocate pages for the insecure memory. */
KPageGroup pg(m_block_info_manager);
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront)));
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, 1, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront)));
/* Close the opened pages when we're done with them. */
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
@ -1237,14 +1419,14 @@ namespace ams::kern {
return this->GetSize(KMemoryState_AliasCodeData);
}
Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, KMemoryPermission perm) {
Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties &properties) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Create a page group to hold the pages we allocate. */
KPageGroup pg(m_block_info_manager);
/* Allocate the pages. */
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, 1, m_allocate_option));
/* Ensure that the page group is closed when we're done working with it. */
ON_SCOPE_EXIT { pg.Close(); };
@ -1255,7 +1437,6 @@ namespace ams::kern {
}
/* Map the pages. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_None };
R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false));
}
@ -1506,11 +1687,12 @@ namespace ams::kern {
/* Begin a traversal. */
TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 };
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 };
R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), svc::ResultInvalidCurrentMemory());
/* Traverse until we have enough size or we aren't contiguous any more. */
const KPhysicalAddress phys_address = cur_entry.phys_addr;
const u8 entry_attr = cur_entry.attr;
size_t contig_size;
for (contig_size = cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); contig_size < size; contig_size += cur_entry.block_size) {
if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
@ -1519,6 +1701,9 @@ namespace ams::kern {
if (cur_entry.phys_addr != phys_address + contig_size) {
break;
}
if (cur_entry.attr != entry_attr) {
break;
}
}
/* Take the minimum size for our region. */
@ -1532,7 +1717,7 @@ namespace ams::kern {
}
/* The memory is contiguous, so set the output range. */
out->Set(phys_address, size, is_heap);
out->Set(phys_address, size, is_heap, attr);
R_SUCCEED();
}
@ -1692,17 +1877,17 @@ namespace ams::kern {
KScopedLightLock lk(m_general_lock);
/* Validate that setting heap size is possible at all. */
R_UNLESS(!m_is_kernel, svc::ResultOutOfMemory());
R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), svc::ResultOutOfMemory());
R_UNLESS(size <= m_max_heap_size, svc::ResultOutOfMemory());
R_UNLESS(!m_is_kernel, svc::ResultOutOfMemory());
R_UNLESS(size <= static_cast<size_t>(m_region_ends[RegionType_Heap] - m_region_starts[RegionType_Heap]), svc::ResultOutOfMemory());
R_UNLESS(size <= m_max_heap_size, svc::ResultOutOfMemory());
if (size < static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
if (size < static_cast<size_t>(m_current_heap_end - m_region_starts[RegionType_Heap])) {
/* The size being requested is less than the current size, so we need to free the end of the heap. */
/* Validate memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
m_heap_region_start + size, (m_current_heap_end - m_heap_region_start) - size,
m_region_starts[RegionType_Heap] + size, (m_current_heap_end - m_region_starts[RegionType_Heap]) - size,
KMemoryState_All, KMemoryState_Normal,
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
KMemoryAttribute_All, KMemoryAttribute_None));
@ -1716,30 +1901,30 @@ namespace ams::kern {
KScopedPageTableUpdater updater(this);
/* Unmap the end of the heap. */
const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize;
const size_t num_pages = ((m_current_heap_end - m_region_starts[RegionType_Heap]) - size) / PageSize;
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
R_TRY(this->Operate(updater.GetPageList(), m_region_starts[RegionType_Heap] + size, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
/* Release the memory from the resource limit. */
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, num_pages * PageSize);
/* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, size == 0 ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None);
m_memory_block_manager.Update(std::addressof(allocator), m_region_starts[RegionType_Heap] + size, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, size == 0 ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None);
/* Update the current heap end. */
m_current_heap_end = m_heap_region_start + size;
m_current_heap_end = m_region_starts[RegionType_Heap] + size;
/* Set the output. */
*out = m_heap_region_start;
*out = m_region_starts[RegionType_Heap];
R_SUCCEED();
} else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
} else if (size == static_cast<size_t>(m_current_heap_end - m_region_starts[RegionType_Heap])) {
/* The size requested is exactly the current size. */
*out = m_heap_region_start;
*out = m_region_starts[RegionType_Heap];
R_SUCCEED();
} else {
/* We have to allocate memory. Determine how much to allocate and where while the table is locked. */
cur_address = m_current_heap_end;
allocation_size = size - (m_current_heap_end - m_heap_region_start);
allocation_size = size - (m_current_heap_end - m_region_starts[RegionType_Heap]);
}
}
@ -1749,7 +1934,7 @@ namespace ams::kern {
/* Allocate pages for the heap extension. */
KPageGroup pg(m_block_info_manager);
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, m_allocate_option));
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, 1, m_allocate_option));
/* Close the opened pages when we're done with them. */
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
@ -1782,20 +1967,20 @@ namespace ams::kern {
/* Map the pages. */
const size_t num_pages = allocation_size / PageSize;
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, (m_current_heap_end == m_heap_region_start) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, (m_current_heap_end == m_region_starts[RegionType_Heap]) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg, map_properties, OperationType_MapGroup, false));
/* We succeeded, so commit our memory reservation. */
memory_reservation.Commit();
/* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
m_memory_block_manager.Update(std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, m_region_starts[RegionType_Heap] == m_current_heap_end ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
/* Update the current heap end. */
m_current_heap_end = m_heap_region_start + size;
m_current_heap_end = m_region_starts[RegionType_Heap] + size;
/* Set the output. */
*out = m_heap_region_start;
*out = m_region_starts[RegionType_Heap];
R_SUCCEED();
}
}
@ -1927,8 +2112,8 @@ namespace ams::kern {
const KPhysicalAddress last = phys_addr + size - 1;
/* Get region extents. */
const KProcessAddress region_start = m_kernel_map_region_start;
const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start;
const KProcessAddress region_start = m_region_starts[RegionType_KernelMap];
const size_t region_size = m_region_ends[RegionType_KernelMap] - m_region_starts[RegionType_KernelMap];
const size_t region_num_pages = region_size / PageSize;
MESOSPHERE_ASSERT(this->CanContain(region_start, region_size, state));
@ -2237,11 +2422,11 @@ namespace ams::kern {
KScopedPageTableUpdater updater(this);
/* Perform mapping operation. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
if (is_pa_valid) {
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
} else {
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, properties));
}
/* Update the blocks. */
@ -2273,7 +2458,8 @@ namespace ams::kern {
KScopedPageTableUpdater updater(this);
/* Map the pages. */
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, properties));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
@ -2812,7 +2998,7 @@ namespace ams::kern {
MESOSPHERE_ABORT_UNLESS(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), address));
/* Determine the current read size. */
const size_t cur_size = std::min<size_t>(last_address - address + 1, util::AlignDown(GetInteger(address) + PageSize, PageSize) - GetInteger(address));
const size_t cur_size = std::min<size_t>(last_address - address + 1, PageSize - (GetInteger(address) & (PageSize - 1)));
/* Read. */
R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state));
@ -2848,7 +3034,7 @@ namespace ams::kern {
MESOSPHERE_ABORT_UNLESS(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), address));
/* Determine the current read size. */
const size_t cur_size = std::min<size_t>(last_address - address + 1, util::AlignDown(GetInteger(address) + PageSize, PageSize) - GetInteger(address));
const size_t cur_size = std::min<size_t>(last_address - address + 1, PageSize - (GetInteger(address) & (PageSize - 1)));
/* Read. */
R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state));
@ -3720,8 +3906,8 @@ namespace ams::kern {
MESOSPHERE_ASSERT(src_page_table.IsLockedByCurrentThread());
/* Check that we can theoretically map. */
const KProcessAddress region_start = m_alias_region_start;
const size_t region_size = m_alias_region_end - m_alias_region_start;
const KProcessAddress region_start = m_region_starts[RegionType_Alias];
const size_t region_size = m_region_ends[RegionType_Alias] - m_region_starts[RegionType_Alias];
R_UNLESS(size < region_size, svc::ResultOutOfAddressSpace());
/* Get aligned source extents. */
@ -4662,7 +4848,7 @@ namespace ams::kern {
/* Allocate the new memory. */
const size_t num_pages = size / PageSize;
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront)));
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, 1, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront)));
/* Close the page group when we're done with it. */
ON_SCOPE_EXIT { pg.Close(); };

View file

@ -298,10 +298,8 @@ namespace ams::kern {
/* Setup page table. */
{
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit));
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
@ -379,10 +377,8 @@ namespace ams::kern {
/* Setup page table. */
{
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, m_system_resource, res_limit));
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };

View file

@ -37,7 +37,7 @@ namespace ams::kern {
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate the memory. */
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, owner->GetAllocateOption()));
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, 1, owner->GetAllocateOption()));
/* Commit our reservation. */
memory_reservation.Commit();

View file

@ -39,17 +39,18 @@ namespace ams::kern {
KPhysicalAddress KSystemControlBase::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
if (intended_dram_size * 2 < real_dram_size) {
if (intended_dram_size * 2 <= real_dram_size) {
return base_address;
} else {
return base_address + ((real_dram_size - intended_dram_size) / 2);
}
}
void KSystemControlBase::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
void KSystemControlBase::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out, KPhysicalAddress kern_base_address) {
*out = {
.address = GetInteger(KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress)) + KSystemControl::Init::GetIntendedMemorySize() - InitialProcessBinarySizeMax,
._08 = 0,
.address = GetInteger(KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress)) + KSystemControl::Init::GetIntendedMemorySize() - InitialProcessBinarySizeMax,
._08 = 0,
.kern_address = GetInteger(kern_base_address),
};
}
@ -77,7 +78,7 @@ namespace ams::kern {
void KSystemControlBase::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
#if defined(ATMOSPHERE_ARCH_ARM64)
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0, false>(core_id, entrypoint, arg)) == 0);
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0>(core_id, entrypoint, arg)) == 0);
#else
AMS_INFINITE_LOOP();
#endif

View file

@ -106,6 +106,9 @@ namespace ams::kern::svc {
*out = 0;
}
break;
case ams::svc::InfoType_AliasRegionExtraSize:
*out = process->GetPageTable().GetAliasRegionExtraSize();
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
@ -134,6 +137,7 @@ namespace ams::kern::svc {
case ams::svc::InfoType_UsedNonSystemMemorySize:
case ams::svc::InfoType_IsApplication:
case ams::svc::InfoType_FreeThreadCount:
case ams::svc::InfoType_AliasRegionExtraSize:
{
/* These info types don't support non-zero subtypes. */
R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination());

View file

@ -162,6 +162,18 @@ namespace ams::kern::svc {
/* Check that the number of extra resource pages is >= 0. */
R_UNLESS(params.system_resource_num_pages >= 0, svc::ResultInvalidSize());
/* Validate that the alias region extra size is allowed, if enabled. */
if (params.flags & ams::svc::CreateProcessFlag_EnableAliasRegionExtraSize) {
/* Check that we have a 64-bit address space. */
R_UNLESS((params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask) == ams::svc::CreateProcessFlag_AddressSpace64Bit, svc::ResultInvalidState());
/* Check that the system resource page count is non-zero. */
R_UNLESS(params.system_resource_num_pages > 0, svc::ResultInvalidState());
/* Check that debug mode is enabled. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultInvalidState());
}
/* Convert to sizes. */
const size_t code_num_pages = params.code_num_pages;
const size_t system_resource_num_pages = params.system_resource_num_pages;

View file

@ -34,7 +34,6 @@ namespace ams::erpt {
enum CategoryId {
AMS_ERPT_FOREACH_CATEGORY(GENERATE_ENUM)
CategoryId_Count,
};
#undef GENERATE_ENUM
@ -43,7 +42,6 @@ namespace ams::erpt {
enum FieldId {
AMS_ERPT_FOREACH_FIELD(GENERATE_ENUM)
FieldId_Count,
};
#undef GENERATE_ENUM

View file

@ -58,34 +58,88 @@ namespace ams::erpt::srv {
};
#undef STRINGIZE_HANDLER
#define GET_FIELD_CATEGORY(FIELD, ID, CATEGORY, TYPE, FLAG) [FieldId_##FIELD] = CategoryId_##CATEGORY,
constexpr inline const CategoryId FieldToCategoryMap[] = {
#define GET_FIELD_CATEGORY(FIELD, ID, CATEGORY, TYPE, FLAG) CategoryId_##CATEGORY,
constexpr inline const CategoryId FieldIndexToCategoryMap[] = {
AMS_ERPT_FOREACH_FIELD(GET_FIELD_CATEGORY)
};
#undef GET_FIELD_CATEGORY
#define GET_FIELD_TYPE(FIELD, ID, CATEGORY, TYPE, FLAG) [FieldId_##FIELD] = TYPE,
constexpr inline const FieldType FieldToTypeMap[] = {
#define GET_FIELD_TYPE(FIELD, ID, CATEGORY, TYPE, FLAG) TYPE,
constexpr inline const FieldType FieldIndexToTypeMap[] = {
AMS_ERPT_FOREACH_FIELD(GET_FIELD_TYPE)
};
#undef GET_FIELD_TYPE
#define GET_FIELD_FLAG(FIELD, ID, CATEGORY, TYPE, FLAG) [FieldId_##FIELD] = FLAG,
constexpr inline const FieldFlag FieldToFlagMap[] = {
#define GET_FIELD_FLAG(FIELD, ID, CATEGORY, TYPE, FLAG) FLAG,
constexpr inline const FieldFlag FieldIndexToFlagMap[] = {
AMS_ERPT_FOREACH_FIELD(GET_FIELD_FLAG)
};
#undef GET_FIELD_FLAG
inline CategoryId ConvertFieldToCategory(FieldId id) {
return FieldToCategoryMap[id];
#define GET_FIELD_ID(FIELD, ...) FieldId_##FIELD,
constexpr inline const FieldId FieldIndexToFieldIdMap[] = {
AMS_ERPT_FOREACH_FIELD(GET_FIELD_ID)
};
#undef GET_FIELD_ID
#define GET_CATEGORY_ID(CATEGORY, ...) CategoryId_##CATEGORY,
constexpr inline const CategoryId CategoryIndexToCategoryIdMap[] = {
AMS_ERPT_FOREACH_CATEGORY(GET_CATEGORY_ID)
};
#undef GET_CATEGORY_ID
constexpr util::optional<size_t> FindFieldIndex(FieldId id) {
if (std::is_constant_evaluated()) {
for (size_t i = 0; i < util::size(FieldIndexToFieldIdMap); ++i) {
if (FieldIndexToFieldIdMap[i] == id) {
return i;
}
}
return util::nullopt;
} else {
if (const auto it = std::lower_bound(std::begin(FieldIndexToFieldIdMap), std::end(FieldIndexToFieldIdMap), id); it != std::end(FieldIndexToFieldIdMap) && *it == id) {
return std::distance(FieldIndexToFieldIdMap, it);
} else {
return util::nullopt;
}
}
}
inline FieldType ConvertFieldToType(FieldId id) {
return FieldToTypeMap[id];
constexpr util::optional<size_t> FindCategoryIndex(CategoryId id) {
if (std::is_constant_evaluated()) {
for (size_t i = 0; i < util::size(CategoryIndexToCategoryIdMap); ++i) {
if (CategoryIndexToCategoryIdMap[i] == id) {
return i;
}
}
return util::nullopt;
} else {
if (const auto it = std::lower_bound(std::begin(CategoryIndexToCategoryIdMap), std::end(CategoryIndexToCategoryIdMap), id); it != std::end(CategoryIndexToCategoryIdMap) && *it == id) {
return std::distance(CategoryIndexToCategoryIdMap, it);
} else {
return util::nullopt;
}
}
}
inline FieldFlag ConvertFieldToFlag(FieldId id) {
return FieldToFlagMap[id];
constexpr inline CategoryId ConvertFieldToCategory(FieldId id) {
const auto index = FindFieldIndex(id);
AMS_ASSERT(index.has_value());
return FieldIndexToCategoryMap[index.value()];
}
constexpr inline FieldType ConvertFieldToType(FieldId id) {
const auto index = FindFieldIndex(id);
AMS_ASSERT(index.has_value());
return FieldIndexToTypeMap[index.value()];
}
constexpr inline FieldFlag ConvertFieldToFlag(FieldId id) {
const auto index = FindFieldIndex(id);
AMS_ASSERT(index.has_value());
return FieldIndexToFlagMap[index.value()];
}
constexpr inline ReportFlagSet MakeNoReportFlags() {

View file

@ -81,6 +81,8 @@ namespace ams::hos {
Version_16_0_3 = ::ams::TargetFirmware_16_0_3,
Version_16_1_0 = ::ams::TargetFirmware_16_1_0,
Version_17_0_0 = ::ams::TargetFirmware_17_0_0,
Version_17_0_1 = ::ams::TargetFirmware_17_0_1,
Version_18_0_0 = ::ams::TargetFirmware_18_0_0,
Version_Current = ::ams::TargetFirmware_Current,

View file

@ -70,6 +70,7 @@ namespace ams::spl::impl {
Result ModularExponentiateWithDrmDeviceCertKey(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size);
Result PrepareEsArchiveKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation);
Result LoadPreparedAesKey(s32 keyslot, const AccessKey &access_key);
Result PrepareEsUnknown2Key(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation);
/* FS */
Result DecryptAndStoreGcKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option);

View file

@ -28,6 +28,7 @@
AMS_SF_METHOD_INFO(C, H, 28, Result, DecryptAndStoreDrmDeviceCertKey, (const sf::InPointerBuffer &src, spl::AccessKey access_key, spl::KeySource key_source), (src, access_key, key_source), hos::Version_5_0_0) \
AMS_SF_METHOD_INFO(C, H, 29, Result, ModularExponentiateWithDrmDeviceCertKey, (const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod), (out, base, mod), hos::Version_5_0_0) \
AMS_SF_METHOD_INFO(C, H, 31, Result, PrepareEsArchiveKey, (sf::Out<spl::AccessKey> out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation), (out_access_key, base, mod, label_digest, generation), hos::Version_6_0_0) \
AMS_SF_METHOD_INFO(C, H, 32, Result, LoadPreparedAesKey, (s32 keyslot, spl::AccessKey access_key), (keyslot, access_key), hos::Version_6_0_0)
AMS_SF_METHOD_INFO(C, H, 32, Result, LoadPreparedAesKey, (s32 keyslot, spl::AccessKey access_key), (keyslot, access_key), hos::Version_6_0_0) \
AMS_SF_METHOD_INFO(C, H, 33, Result, PrepareEsUnknown2Key, (sf::Out<spl::AccessKey> out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation), (out_access_key, base, mod, label_digest, generation), hos::Version_18_0_0)
AMS_SF_DEFINE_INTERFACE_WITH_BASE(ams::spl::impl, IEsInterface, ::ams::spl::impl::IDeviceUniqueDataInterface, AMS_SPL_I_ES_INTERFACE_INTERFACE_INFO, 0x346D5001)

View file

@ -69,13 +69,13 @@ namespace ams::erpt::srv {
auto guard = SCOPE_GUARD { m_ctx.field_count = 0; };
R_UNLESS(m_ctx.field_count <= FieldsPerContext, erpt::ResultInvalidArgument());
R_UNLESS(0 <= m_ctx.category && m_ctx.category < CategoryId_Count, erpt::ResultInvalidArgument());
R_UNLESS(m_ctx.field_count <= FieldsPerContext, erpt::ResultInvalidArgument());
R_UNLESS(FindCategoryIndex(m_ctx.category).has_value(), erpt::ResultInvalidArgument());
for (u32 i = 0; i < m_ctx.field_count; i++) {
m_ctx.fields[i] = ctx_ptr->fields[i];
R_UNLESS(0 <= m_ctx.fields[i].id && m_ctx.fields[i].id < FieldId_Count, erpt::ResultInvalidArgument());
R_UNLESS(FindFieldIndex(m_ctx.fields[i].id).has_value(), erpt::ResultInvalidArgument());
R_UNLESS(0 <= m_ctx.fields[i].type && m_ctx.fields[i].type < FieldType_Count, erpt::ResultInvalidArgument());
R_UNLESS(m_ctx.fields[i].type == ConvertFieldToType(m_ctx.fields[i].id), erpt::ResultFieldTypeMismatch());

View file

@ -62,7 +62,10 @@ namespace ams::erpt::srv {
static Result AddId(Report *report, FieldId field_id) {
static_assert(MaxFieldStringSize < ElementSize_256);
R_TRY(AddStringValue(report, FieldString[field_id], strnlen(FieldString[field_id], MaxFieldStringSize)));
const auto index = FindFieldIndex(field_id);
AMS_ASSERT(index.has_value());
R_TRY(AddStringValue(report, FieldString[index.value()], strnlen(FieldString[index.value()], MaxFieldStringSize)));
R_SUCCEED();
}

View file

@ -105,8 +105,8 @@ namespace ams::erpt::srv {
g_sf_allocator.Attach(g_heap_handle);
for (auto i = 0; i < CategoryId_Count; i++) {
Context *ctx = new Context(static_cast<CategoryId>(i));
for (const auto category_id : CategoryIndexToCategoryIdMap) {
Context *ctx = new Context(category_id);
AMS_ABORT_UNLESS(ctx != nullptr);
}

View file

@ -277,7 +277,7 @@ namespace ams::erpt::srv {
void SaveSyslogReportIfRequired(const ContextEntry *ctx, const ReportId &report_id) {
bool needs_save_syslog = true;
for (u32 i = 0; i < ctx->field_count; i++) {
static_assert(FieldToTypeMap[FieldId_HasSyslogFlag] == FieldType_Bool);
static_assert(FieldIndexToTypeMap[*FindFieldIndex(FieldId_HasSyslogFlag)] == FieldType_Bool);
if (ctx->fields[i].id == FieldId_HasSyslogFlag && !ctx->fields[i].value_bool) {
needs_save_syslog = false;
break;

View file

@ -21,7 +21,7 @@ namespace ams::fs::impl {
#define ADD_ENUM_CASE(v) case v: return #v
template<> const char *IdString::ToString<pkg1::KeyGeneration>(pkg1::KeyGeneration id) {
static_assert(pkg1::KeyGeneration_Current == pkg1::KeyGeneration_17_0_0);
static_assert(pkg1::KeyGeneration_Current == pkg1::KeyGeneration_18_0_0);
switch (id) {
using enum pkg1::KeyGeneration;
case KeyGeneration_1_0_0: return "1.0.0-2.3.0";
@ -40,7 +40,8 @@ namespace ams::fs::impl {
case KeyGeneration_14_0_0: return "14.0.0-14.1.2";
case KeyGeneration_15_0_0: return "15.0.0-15.0.1";
case KeyGeneration_16_0_0: return "16.0.0-16.0.3";
case KeyGeneration_17_0_0: return "17.0.0-";
case KeyGeneration_17_0_0: return "17.0.0-17.0.1";
case KeyGeneration_18_0_0: return "18.0.0-";
default: return "Unknown";
}
}

View file

@ -21,11 +21,12 @@ namespace ams::os::impl {
class VammManagerHorizonImpl {
public:
static void GetReservedRegionImpl(uintptr_t *out_start, uintptr_t *out_size) {
u64 start, size;
R_ABORT_UNLESS(svc::GetInfo(std::addressof(start), svc::InfoType_AliasRegionAddress, svc::PseudoHandle::CurrentProcess, 0));
R_ABORT_UNLESS(svc::GetInfo(std::addressof(size), svc::InfoType_AliasRegionSize, svc::PseudoHandle::CurrentProcess, 0));
u64 start, size, extra_size;
R_ABORT_UNLESS(svc::GetInfo(std::addressof(start), svc::InfoType_AliasRegionAddress, svc::PseudoHandle::CurrentProcess, 0));
R_ABORT_UNLESS(svc::GetInfo(std::addressof(size), svc::InfoType_AliasRegionSize, svc::PseudoHandle::CurrentProcess, 0));
R_ABORT_UNLESS(svc::GetInfo(std::addressof(extra_size), svc::InfoType_AliasRegionExtraSize, svc::PseudoHandle::CurrentProcess, 0));
*out_start = start;
*out_size = size;
*out_size = size - extra_size;
}
static Result AllocatePhysicalMemoryImpl(uintptr_t address, size_t size) {

View file

@ -893,6 +893,10 @@ namespace ams::spl::impl {
R_RETURN(PrepareEsDeviceUniqueKey(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, smc::EsDeviceUniqueKeyType::ArchiveKey, generation));
}
Result PrepareEsUnknown2Key(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation) {
R_RETURN(PrepareEsDeviceUniqueKey(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, smc::EsDeviceUniqueKeyType::Unknown2, generation));
}
/* FS */
Result DecryptAndStoreGcKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) {
R_RETURN(DecryptAndStoreDeviceUniqueKey(src, src_size, access_key, key_source, option));

View file

@ -16,11 +16,11 @@
#pragma once
#define ATMOSPHERE_RELEASE_VERSION_MAJOR 1
#define ATMOSPHERE_RELEASE_VERSION_MINOR 6
#define ATMOSPHERE_RELEASE_VERSION_MICRO 2
#define ATMOSPHERE_RELEASE_VERSION_MINOR 7
#define ATMOSPHERE_RELEASE_VERSION_MICRO 0
#define ATMOSPHERE_RELEASE_VERSION ATMOSPHERE_RELEASE_VERSION_MAJOR, ATMOSPHERE_RELEASE_VERSION_MINOR, ATMOSPHERE_RELEASE_VERSION_MICRO
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR 17
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR 18
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR 0
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MICRO 0

View file

@ -79,8 +79,10 @@
#define ATMOSPHERE_TARGET_FIRMWARE_16_0_3 ATMOSPHERE_TARGET_FIRMWARE(16, 0, 3)
#define ATMOSPHERE_TARGET_FIRMWARE_16_1_0 ATMOSPHERE_TARGET_FIRMWARE(16, 1, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_17_0_0 ATMOSPHERE_TARGET_FIRMWARE(17, 0, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_17_0_1 ATMOSPHERE_TARGET_FIRMWARE(17, 0, 1)
#define ATMOSPHERE_TARGET_FIRMWARE_18_0_0 ATMOSPHERE_TARGET_FIRMWARE(18, 0, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_CURRENT ATMOSPHERE_TARGET_FIRMWARE_17_0_0
#define ATMOSPHERE_TARGET_FIRMWARE_CURRENT ATMOSPHERE_TARGET_FIRMWARE_18_0_0
#define ATMOSPHERE_TARGET_FIRMWARE_MIN ATMOSPHERE_TARGET_FIRMWARE(0, 0, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_MAX ATMOSPHERE_TARGET_FIRMWARE_CURRENT
@ -152,6 +154,8 @@ namespace ams {
TargetFirmware_16_0_3 = ATMOSPHERE_TARGET_FIRMWARE_16_0_3,
TargetFirmware_16_1_0 = ATMOSPHERE_TARGET_FIRMWARE_16_1_0,
TargetFirmware_17_0_0 = ATMOSPHERE_TARGET_FIRMWARE_17_0_0,
TargetFirmware_17_0_1 = ATMOSPHERE_TARGET_FIRMWARE_17_0_1,
TargetFirmware_18_0_0 = ATMOSPHERE_TARGET_FIRMWARE_18_0_0,
TargetFirmware_Current = ATMOSPHERE_TARGET_FIRMWARE_CURRENT,

View file

@ -190,6 +190,7 @@ namespace ams::svc {
InfoType_ThreadTickCount = 25,
InfoType_IsSvcPermitted = 26,
InfoType_IoRegionHint = 27,
InfoType_AliasRegionExtraSize = 28,
InfoType_MesosphereMeta = 65000,
InfoType_MesosphereCurrentProcess = 65001,
@ -436,15 +437,19 @@ namespace ams::svc {
/* 11.x+ DisableDeviceAddressSpaceMerge. */
CreateProcessFlag_DisableDeviceAddressSpaceMerge = (1 << 12),
/* 18.x EnableAliasRegionExtraSize. */
CreateProcessFlag_EnableAliasRegionExtraSize = (1 << 13),
/* Mask of all flags. */
CreateProcessFlag_All = CreateProcessFlag_Is64Bit |
CreateProcessFlag_AddressSpaceMask |
CreateProcessFlag_EnableDebug |
CreateProcessFlag_EnableAslr |
CreateProcessFlag_IsApplication |
CreateProcessFlag_PoolPartitionMask |
CreateProcessFlag_OptimizeMemoryAllocation |
CreateProcessFlag_DisableDeviceAddressSpaceMerge,
CreateProcessFlag_All = CreateProcessFlag_Is64Bit |
CreateProcessFlag_AddressSpaceMask |
CreateProcessFlag_EnableDebug |
CreateProcessFlag_EnableAslr |
CreateProcessFlag_IsApplication |
CreateProcessFlag_PoolPartitionMask |
CreateProcessFlag_OptimizeMemoryAllocation |
CreateProcessFlag_DisableDeviceAddressSpaceMerge |
CreateProcessFlag_EnableAliasRegionExtraSize,
};
/* Debug types. */

View file

@ -57,8 +57,8 @@ namespace ams::svc {
/* This is the highest SVC version supported by Atmosphere, to be updated on new kernel releases. */
/* NOTE: Official kernel versions have SVC major = SDK major + 4, SVC minor = SDK minor. */
constexpr inline u32 SupportedKernelMajorVersion = ConvertToSvcMajorVersion(17);
constexpr inline u32 SupportedKernelMinorVersion = ConvertToSvcMinorVersion( 5);
constexpr inline u32 SupportedKernelMajorVersion = ConvertToSvcMajorVersion(18);
constexpr inline u32 SupportedKernelMinorVersion = ConvertToSvcMinorVersion( 3);
constexpr inline u32 SupportedKernelVersion = EncodeKernelVersion(SupportedKernelMajorVersion, SupportedKernelMinorVersion);

View file

@ -345,7 +345,7 @@ namespace ams::kern::init {
MESOSPHERE_INIT_ABORT_UNLESS(slab_region_size <= resource_region_size);
/* Setup the slab region. */
const KPhysicalAddress code_start_phys_addr = init_pt.GetPhysicalAddressOfRandomizedRange(code_start_virt_addr, code_region_size);
const KPhysicalAddress code_start_phys_addr = g_phase2_initial_process_binary_meta.layout.kern_address;
const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + code_region_size;
const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;

View file

@ -195,7 +195,7 @@ namespace ams::kern::init::loader {
/* Setup the INI1 header in memory for the kernel. */
{
/* Get the kernel layout. */
KSystemControl::Init::GetInitialProcessBinaryLayout(std::addressof(g_initial_process_binary_meta.layout));
KSystemControl::Init::GetInitialProcessBinaryLayout(std::addressof(g_initial_process_binary_meta.layout), base_address);
/* If there's no desired base address, use the ini in place. */
if (g_initial_process_binary_meta.layout.address == 0) {

View file

@ -1232,6 +1232,7 @@ namespace ams::dmnt {
}
if (reply_cur != send_buffer) {
AMS_DMNT2_GDB_LOG_DEBUG("ProcessDebugEvents: %s\n", send_buffer);
bool do_break;
this->SendPacket(std::addressof(do_break), send_buffer);
if (do_break) {
@ -1322,6 +1323,8 @@ namespace ams::dmnt {
/* Clear our reply packet. */
reply[0] = 0;
bool should_log_result = true;
/* Handle the received packet. */
switch (m_receive_packet[0]) {
case 'D':
@ -1358,9 +1361,11 @@ namespace ams::dmnt {
if (!this->g()) {
m_killed = true;
}
should_log_result = false;
break;
case 'm':
this->m();
should_log_result = false;
break;
case 'p':
this->p();
@ -1381,9 +1386,10 @@ namespace ams::dmnt {
this->QuestionMark();
break;
default:
AMS_DMNT2_GDB_LOG_DEBUG("Not Implemented: %s\n", m_receive_packet);
AMS_DMNT2_GDB_LOG_ERROR("Not Implemented: %s\n", m_receive_packet);
break;
}
AMS_DMNT2_GDB_LOG_DEBUG("Reply: %s\n", should_log_result ? reply : "[...]");
}
void GdbServerImpl::D() {
@ -1401,6 +1407,7 @@ namespace ams::dmnt {
/* Get thread context. */
svc::ThreadContext ctx;
if (R_FAILED(m_debug_process.GetThreadContext(std::addressof(ctx), thread_id, svc::ThreadContextFlag_All))) {
AMS_DMNT2_GDB_LOG_ERROR("Failed to get thread context\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -1412,6 +1419,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.SetThreadContext(std::addressof(ctx), thread_id, svc::ThreadContextFlag_All))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to set thread context\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1421,9 +1429,11 @@ namespace ams::dmnt {
if (ParsePrefix(m_receive_packet, "Hg") || ParsePrefix(m_receive_packet, "HG")) {
this->Hg();
} else {
AMS_DMNT2_GDB_LOG_ERROR("'H'-command not implemented: %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
} else {
AMS_DMNT2_GDB_LOG_ERROR("Cannot use 'H'-command without DebugProcess\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1458,6 +1468,7 @@ namespace ams::dmnt {
if (success) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("'Hg'-command failed\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1468,6 +1479,7 @@ namespace ams::dmnt {
/* Validate format. */
char *comma = std::strchr(m_receive_packet, ',');
if (comma == nullptr) {
AMS_DMNT2_GDB_LOG_ERROR("'M' command formatted incorrectly (no ','): %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -1475,6 +1487,7 @@ namespace ams::dmnt {
char *colon = std::strchr(comma + 1, ':');
if (colon == nullptr) {
AMS_DMNT2_GDB_LOG_ERROR("'M' command formatted incorrectly (no ':'): %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -1484,6 +1497,7 @@ namespace ams::dmnt {
const u64 address = DecodeHex(m_receive_packet);
const u64 length = DecodeHex(comma + 1);
if (length >= sizeof(m_buffer)) {
AMS_DMNT2_GDB_LOG_ERROR("Length exceeded buffer size: %ld >= %ld\n", length, sizeof(m_buffer));
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -1495,6 +1509,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.WriteMemory(m_buffer, address, length))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to write memory\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1505,6 +1520,7 @@ namespace ams::dmnt {
/* Validate format. */
char *equal = std::strchr(m_receive_packet, '=');
if (equal == nullptr) {
AMS_DMNT2_GDB_LOG_ERROR("'P' command formatted incorrectly (no '='): %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -1530,9 +1546,11 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.SetThreadContext(std::addressof(ctx), thread_id, flags))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to set thread context");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to get thread context");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1541,7 +1559,7 @@ namespace ams::dmnt {
if (ParsePrefix(m_receive_packet, "QStartNoAckMode")) {
this->QStartNoAckMode();
} else {
AMS_DMNT2_GDB_LOG_DEBUG("Not Implemented Q: %s\n", m_receive_packet);
AMS_DMNT2_GDB_LOG_ERROR("Not Implemented Q: %s\n", m_receive_packet);
}
}
@ -1558,9 +1576,11 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.GetThreadContext(std::addressof(ctx), thread_id, svc::ThreadContextFlag_Control))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AppendReplyFormat(m_reply_cur, m_reply_end, "E01");
AMS_DMNT2_GDB_LOG_ERROR("Failed to get thread context");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
} else {
AMS_DMNT2_GDB_LOG_ERROR("'T' command formatted incorrectly (no '.'): %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1571,6 +1591,7 @@ namespace ams::dmnt {
/* Decode the type. */
if (!('0' <= m_receive_packet[0] && m_receive_packet[0] <= '4') || m_receive_packet[1] != ',') {
AMS_DMNT2_GDB_LOG_ERROR("'Z' command formatted incorrectly (not starting with pattern '[0-4],'): %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -1581,6 +1602,7 @@ namespace ams::dmnt {
/* Decode the address/length. */
const char *comma = std::strchr(m_receive_packet, ',');
if (comma == nullptr) {
AMS_DMNT2_GDB_LOG_ERROR("'Z' command formatted incorrectly (no ','): %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -1596,6 +1618,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.SetBreakPoint(address, length, false))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to set software breakpoint\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1607,6 +1630,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.SetHardwareBreakPoint(address, length, false))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to set hardware breakpoint\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1618,6 +1642,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.SetWatchPoint(address, length, false, true))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to set watchpoint-W\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1629,6 +1654,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.SetWatchPoint(address, length, true, false))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to set watchpoint-R\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1640,6 +1666,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.SetWatchPoint(address, length, true, true))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to set watchpoint-A\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1668,6 +1695,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(result)) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to continue thread\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1697,6 +1725,7 @@ namespace ams::dmnt {
/* Validate format. */
const char *comma = std::strchr(m_receive_packet, ',');
if (comma == nullptr) {
AMS_DMNT2_GDB_LOG_ERROR("'m' command formatted incorrectly (no ','): %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -1705,6 +1734,7 @@ namespace ams::dmnt {
const u64 address = DecodeHex(m_receive_packet);
const u64 length = DecodeHex(comma + 1);
if (length >= sizeof(m_buffer)) {
AMS_DMNT2_GDB_LOG_ERROR("Length exceeded buffer size: %ld >= %ld\n", length, sizeof(m_buffer));
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -1712,6 +1742,7 @@ namespace ams::dmnt {
/* Read the memory. */
/* TODO: Detect partial readability? */
if (R_FAILED(m_debug_process.ReadMemory(m_buffer, address, length))) {
AMS_DMNT2_GDB_LOG_ERROR("Failed to read memory\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -1745,6 +1776,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.GetThreadContext(std::addressof(ctx), thread_id, flags))) {
SetGdbRegisterPacket(m_reply_cur, m_reply_end, ctx, reg_num, m_debug_process.Is64Bit());
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to get thread context");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1755,7 +1787,7 @@ namespace ams::dmnt {
} else if (ParsePrefix(m_receive_packet, "vCont")) {
this->vCont();
} else {
AMS_DMNT2_GDB_LOG_DEBUG("Not Implemented v: %s\n", m_receive_packet);
AMS_DMNT2_GDB_LOG_ERROR("Not Implemented v: %s\n", m_receive_packet);
}
}
@ -1780,12 +1812,15 @@ namespace ams::dmnt {
/* Set the stop reply packet. */
this->AppendStopReplyPacket(m_debug_process.GetLastSignal());
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to attach to process\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
} else {
AMS_DMNT2_GDB_LOG_ERROR("Invalid process id: %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
} else {
AMS_DMNT2_GDB_LOG_ERROR("Cannot attach to process while already attached\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1937,7 +1972,7 @@ namespace ams::dmnt {
} else if (ParsePrefix(m_receive_packet, "qXfer:")) {
this->qXfer();
} else {
AMS_DMNT2_GDB_LOG_DEBUG("Not Implemented q: %s\n", m_receive_packet);
AMS_DMNT2_GDB_LOG_ERROR("Not Implemented q: %s\n", m_receive_packet);
}
}
@ -1945,6 +1980,7 @@ namespace ams::dmnt {
if (this->HasDebugProcess()) {
AppendReplyFormat(m_reply_cur, m_reply_end, "1");
} else {
AMS_DMNT2_GDB_LOG_ERROR("Not attached\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -1954,6 +1990,7 @@ namespace ams::dmnt {
/* Send the thread id. */
AppendReplyFormat(m_reply_cur, m_reply_end, "QCp%lx.%lx", m_process_id.value, m_debug_process.GetLastThreadId());
} else {
AMS_DMNT2_GDB_LOG_ERROR("Not attached\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -2161,6 +2198,7 @@ namespace ams::dmnt {
} else {
/* All other qXfer require debug process. */
if (!this->HasDebugProcess()) {
AMS_DMNT2_GDB_LOG_ERROR("Not attached\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -2171,6 +2209,7 @@ namespace ams::dmnt {
} else if (ParsePrefix(m_receive_packet, "threads:read::")) {
if (!this->qXferThreadsRead()) {
m_killed = true;
AMS_DMNT2_GDB_LOG_ERROR("Failed to read threads\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
} else if (ParsePrefix(m_receive_packet, "libraries:read::")) {
@ -2178,7 +2217,7 @@ namespace ams::dmnt {
} else if (ParsePrefix(m_receive_packet, "exec-file:read:")) {
AppendReplyFormat(m_reply_cur, m_reply_end, "l%s", m_debug_process.GetProcessName());
} else {
AMS_DMNT2_GDB_LOG_DEBUG("Not Implemented qxfer: %s\n", m_receive_packet);
AMS_DMNT2_GDB_LOG_ERROR("Not Implemented qxfer: %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -2229,7 +2268,7 @@ namespace ams::dmnt {
m_reply_cur[length] = 0;
m_reply_cur += std::strlen(m_reply_cur);
} else {
AMS_DMNT2_GDB_LOG_DEBUG("Not Implemented qxfer:features:read: %s\n", m_receive_packet);
AMS_DMNT2_GDB_LOG_ERROR("Not Implemented qXfer:features:read: %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -2330,7 +2369,7 @@ namespace ams::dmnt {
/* Copy out the process list. */
GetAnnexBufferContents(m_reply_cur, offset, length);
} else {
AMS_DMNT2_GDB_LOG_DEBUG("Not Implemented qxfer:osdata:read: %s\n", m_receive_packet);
AMS_DMNT2_GDB_LOG_ERROR("Not Implemented qXfer:osdata:read: %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -2426,6 +2465,7 @@ namespace ams::dmnt {
/* Decode the type. */
if (!('0' <= m_receive_packet[0] && m_receive_packet[0] <= '4') || m_receive_packet[1] != ',') {
AMS_DMNT2_GDB_LOG_ERROR("'Z' command formatted incorrectly (not starting with pattern '[0-4],'): %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -2436,6 +2476,7 @@ namespace ams::dmnt {
/* Decode the address/length. */
const char *comma = std::strchr(m_receive_packet, ',');
if (comma == nullptr) {
AMS_DMNT2_GDB_LOG_ERROR("'Z' command formatted incorrectly (no ','): %s\n", m_receive_packet);
AppendReplyError(m_reply_cur, m_reply_end, "E01");
return;
}
@ -2450,6 +2491,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.ClearBreakPoint(address, length))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to clear software breakpoint\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -2459,6 +2501,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.ClearHardwareBreakPoint(address, length))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to clear hardware breakpoint\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}
@ -2470,6 +2513,7 @@ namespace ams::dmnt {
if (R_SUCCEEDED(m_debug_process.ClearWatchPoint(address, length))) {
AppendReplyOk(m_reply_cur, m_reply_end);
} else {
AMS_DMNT2_GDB_LOG_ERROR("Failed to clear watchpoint\n");
AppendReplyError(m_reply_cur, m_reply_end, "E01");
}
}

View file

@ -59,6 +59,11 @@ constexpr inline const EmbeddedPatchEntry Usb30ForceEnablePatches_17_0_0[] = {
{ 0x71EC, "\x20\x00\x80\x52\xC0\x03\x5F\xD6", 8 },
};
constexpr inline const EmbeddedPatchEntry Usb30ForceEnablePatches_18_0_0[] = {
{ 0x6DCC, "\x20\x00\x80\x52\xC0\x03\x5F\xD6", 8 },
{ 0x6E48, "\x20\x00\x80\x52\xC0\x03\x5F\xD6", 8 },
};
constexpr inline const EmbeddedPatch Usb30ForceEnablePatches[] = {
{ ParseModuleId("C0D3F4E87E8B0FE9BBE9F1968A20767F3DC08E03"), util::size(Usb30ForceEnablePatches_9_0_0), Usb30ForceEnablePatches_9_0_0 },
{ ParseModuleId("B9C700CA8335F8BAA0D2041D8D09F772890BA988"), util::size(Usb30ForceEnablePatches_10_0_0), Usb30ForceEnablePatches_10_0_0 },
@ -70,4 +75,5 @@ constexpr inline const EmbeddedPatch Usb30ForceEnablePatches[] = {
{ ParseModuleId("30B15A83E94D91750E7470795414AD1AE9C6A8DB"), util::size(Usb30ForceEnablePatches_15_0_0), Usb30ForceEnablePatches_15_0_0 }, /* 15.0.0 */
{ ParseModuleId("225865A442B4B66E8BD14B3E9450B901BDF29A40"), util::size(Usb30ForceEnablePatches_16_0_0), Usb30ForceEnablePatches_16_0_0 }, /* 16.0.0 */
{ ParseModuleId("70D4C2ABCD049F16B301186924367F813DA70248"), util::size(Usb30ForceEnablePatches_17_0_0), Usb30ForceEnablePatches_17_0_0 }, /* 17.0.0 */
{ ParseModuleId("4F21AE15E814FA46515C0401BB23D4F7ADCBF3F4"), util::size(Usb30ForceEnablePatches_18_0_0), Usb30ForceEnablePatches_18_0_0 }, /* 18.0.0 */
};

View file

@ -55,6 +55,10 @@ namespace ams::spl {
Result LoadPreparedAesKey(s32 keyslot, AccessKey access_key) {
R_RETURN(m_manager.LoadPreparedAesKey(keyslot, this, access_key));
}
Result PrepareEsUnknown2Key(sf::Out<AccessKey> out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation) {
R_RETURN(m_manager.PrepareEsUnknown2Key(out_access_key.GetPointer(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize(), generation));
}
};
static_assert(spl::impl::IsIEsInterface<EsService>);

View file

@ -121,6 +121,10 @@ namespace ams::spl {
R_RETURN(impl::PrepareEsArchiveKey(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, generation));
}
Result SecureMonitorManager::PrepareEsUnknown2Key(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation) {
R_RETURN(impl::PrepareEsUnknown2Key(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, generation));
}
Result SecureMonitorManager::PrepareCommonEsTitleKey(AccessKey *out_access_key, const KeySource &key_source, u32 generation) {
R_RETURN(impl::PrepareCommonEsTitleKey(out_access_key, key_source, generation));
}

View file

@ -52,6 +52,7 @@ namespace ams::spl {
Result LoadEsDeviceKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option);
Result PrepareEsTitleKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation);
Result PrepareEsArchiveKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation);
Result PrepareEsUnknown2Key(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation);
Result PrepareCommonEsTitleKey(AccessKey *out_access_key, const KeySource &key_source, u32 generation);
Result LoadPreparedAesKey(s32 keyslot, const void *owner, const AccessKey &access_key);
Result AllocateAesKeySlot(s32 *out_keyslot, const void *owner);

View file

@ -238,6 +238,7 @@ CATEGORIES = {
139 : 'EthernetAdapterOUIInfo',
140 : 'NANDTypeInfo',
141 : 'MicroSDTypeInfo',
1000 : 'TestNx',
}
FIELD_TYPES = {
@ -408,6 +409,14 @@ def find_flags(full, num_fields, magic_idx):
ind = full.index(KNOWN) - magic_idx
return list(up('<'+'B'*num_fields, full[ind:ind+num_fields]))
def find_id_array(full, num_fields, magic_idx, table_format):
if table_format == 0:
return list(range(num_fields))
else:
KNOWN = pk('<IIIIII', *range(444, 450))
ind = full.index(KNOWN) - 4 * magic_idx
return list(up('<' + 'I'*num_fields, full[ind:ind+4*num_fields]))
def cat_to_string(c):
return CATEGORIES[c] if c in CATEGORIES else 'Category_Unknown%d' % c
@ -430,6 +439,8 @@ def main(argc, argv):
cats = find_categories(full, NUM_FIELDS)
types = find_types(full, NUM_FIELDS)
flags = find_flags(full, NUM_FIELDS, fields.index('TestStringEncrypt') - 1)
ids = find_id_array(full, NUM_FIELDS, fields.index('TestStringEncrypt'), table_format)
assert ids[:4] == [0, 1, 2, 3]
print 'Identified %d fields.' % NUM_FIELDS
mf = max(len(s) for s in fields)
mc = max(len(cat_to_string(c)) for c in cats)
@ -453,8 +464,8 @@ def main(argc, argv):
out.write('\n')
out.write('#define AMS_ERPT_FOREACH_FIELD(HANDLER) \\\n')
for i in xrange(NUM_FIELDS):
f, c, t, l = fields[i], cats[i], types[i], flags[i]
out.write((' HANDLER(%%-%ds %%-4s %%-%ds %%-%ds %%-%ds) \\\n' % (mf+1, mc+1, mt+1, ml)) % (f+',', '%d,'%i, cat_to_string(c)+',', typ_to_string(t)+',', flg_to_string(l)))
f, c, t, l, d = fields[i], cats[i], types[i], flags[i], ids[i]
out.write((' HANDLER(%%-%ds %%-4s %%-%ds %%-%ds %%-%ds) \\\n' % (mf+1, mc+1, mt+1, ml)) % (f+',', '%d,'%d, cat_to_string(c)+',', typ_to_string(t)+',', flg_to_string(l)))
out.write('\n')
return 0