Compare commits

...

159 commits

Author SHA1 Message Date
Michael Scire 05fde7b764 ams: may as well test removal of CRTP from Result 2024-06-01 22:36:37 -07:00
Michael Scire 1609f804f2 ams: build with -std=gnu++23 2024-06-01 22:36:37 -07:00
Michael Scire f35c94810c ams: fix compilation with gcc 14 (closes #2330) 2024-06-01 22:36:37 -07:00
Michael Scire 548b48b2a6 loader: update to reflect latest 18.0.0 changes
well, this sure is late, whoops
2024-05-27 15:43:58 -07:00
Michael Scire 35d93a7c41 git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "fadec2981"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "fadec2981"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2024-03-29 03:20:12 -07:00
Michael Scire 410f23035e docs: update changelog for 1.7.0 2024-03-29 03:19:17 -07:00
Michael Scire 29cc13543a kern: fix using memory config for half-of-true-size 2024-03-29 03:18:20 -07:00
Michael Scire 31ad4eec1d git subrepo push emummc
subrepo:
  subdir:   "emummc"
  merged:   "832b24426"
upstream:
  origin:   "https://github.com/m4xw/emummc"
  branch:   "develop"
  commit:   "832b24426"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2024-03-29 02:57:53 -07:00
Michael Scire 3ccb0ae02b git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "8b85add71"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "8b85add71"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2024-03-29 02:56:10 -07:00
Michael Scire 4f7db6e60e docs: add changelog for 1.7.0 2024-03-29 02:54:40 -07:00
Michael Scire a325e18cb5 loader: add usb3 patches for 18.0.0 2024-03-29 02:41:14 -07:00
Michael Scire af41272591 spl: add support for new spl:es command 33 2024-03-29 02:41:14 -07:00
Michael Scire 551821e7e2 erpt: actually support non-sequential ids, nintendo why 2024-03-29 02:41:14 -07:00
Michael Scire b081762657 emummc: update for 18.0.0 2024-03-29 02:41:14 -07:00
Michael Scire d2c2a94c5e erpt: add new IDs/categories 2024-03-29 02:41:14 -07:00
Michael Scire 4ff9278d11 jpegdec: stop bundling (TODO post-prerelease) 2024-03-29 02:41:14 -07:00
Michael Scire 21c85c6a4f exo/fusee: apparently 18.0.0 did not burn a fuse 2024-03-29 02:41:14 -07:00
Michael Scire 05090005b7 svc: advertise support for 18.3.0.0 2024-03-29 02:41:14 -07:00
Michael Scire c0487ad384 kern: fix whoops in new page table logic 2024-03-29 02:41:14 -07:00
Michael Scire ecbe5cd406 kern: refactor smc helpers to share more common logic 2024-03-29 02:41:14 -07:00
Michael Scire 4fe139ea52 kern: return ExceptionType_UnalignedData on data abort caused by alignment fault 2024-03-29 02:41:14 -07:00
Michael Scire 6922eae3e7 kern: add KPageGroup::CopyRangeTo 2024-03-29 02:41:14 -07:00
Michael Scire 952188fc73 kern: implement new attr tracking for memory range/traversal context 2024-03-29 02:41:14 -07:00
Michael Scire c0a4fc30a8 kern: simplify size calculations in KPageTableBase::Read/WriteDebugIoMemory 2024-03-29 02:41:14 -07:00
Michael Scire 0b04c89a84 kern: pass properties directly to KPageTableBase::AllocateAndMapPagesImpl 2024-03-29 02:41:14 -07:00
Michael Scire 217dd1260a kern: take alignment argument in KMemoryManager::AllocateAndOpen 2024-03-29 02:41:14 -07:00
Michael Scire 8aa62a54d8 kern/os: support CreateProcessFlag_EnableAliasRegionExtraSize 2024-03-29 02:41:14 -07:00
Michael Scire 25bae14064 kern: revise KPageTableBase region layout logic to match 18.0.0 changes 2024-03-29 02:41:14 -07:00
Michael Scire 900913fe3b kern: fix longstanding bug in ConvertToKMemoryPermission 2024-03-29 02:41:14 -07:00
Michael Scire 7562f807fd kern: pass kernel base from KernelLdr to Kernel 2024-03-29 02:41:14 -07:00
Michael Scire cf5895e04f kern: use userspace access instructions to read from tlr 2024-03-29 02:41:14 -07:00
Michael Scire 1f37fbed1d fusee/exo/ams: update with new keydata/version enums 2024-03-29 02:41:14 -07:00
JerryWn 9701d5b2ab readme: fix broken discord icon 2024-03-20 03:04:29 -07:00
Michael Scire c8c76bf8f8 readme: I suppose we're living in 2024, then 2024-03-04 14:51:44 -07:00
Michael Scire 0220f67085 fssrv: fix dumb assert error 2024-02-23 13:55:51 -07:00
german77 615f8a3ef3 dmnt: Fix debug log for cheats 2024-02-14 10:28:16 -07:00
Liam 000e382c42 dmnt.gen2: avoid data abort when too many breakpoints are created 2024-02-14 10:27:45 -07:00
Liam 3627356d4b dmnt.gen2: enable gdbserver QStartNoAckMode 2024-02-14 10:27:36 -07:00
Michael Scire 72b0fe6c1c strat: fix nx_debug build target 2024-01-20 13:31:12 -07:00
Michael Scire e919b80fa2 pm: improve resource limit management accuracy
This change was made for #2255, but the issue creator never confirmed if it resolved the issue.

This *does* better reflect what Nintendo's pm does, though, so I'm going to commit it regardless.
2024-01-20 13:09:04 -07:00
Michael Scire fc16f28d0c settings: support PortugueseBr (closes #2264) 2024-01-08 12:20:53 -07:00
Michael Scire e09ba765a1 kern: fix various comment/style hygiene issues (thanks @liamwhite) 2023-12-27 23:24:35 -07:00
Michael Scire 3217df147e kern: allow ktrace map capability when ktrace is disabled 2023-12-27 23:17:52 -07:00
Michael Scire 1fa41c3e2a loader/ro: abort if patching would go out of bounds 2023-12-27 23:05:10 -07:00
Michael Scire db3dc4ebd2 git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "bfc558348"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "bfc558348"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-11-28 13:02:32 -07:00
Michael Scire 742fd16080 sf: fix ipc serialization bug (out object id offsets) 2023-11-28 12:54:00 -07:00
Michael Scire 812b2aeb4c git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "d7a02b6ca"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "d7a02b6ca"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-11-16 16:25:52 -07:00
Michael Scire 46a4357882 fusee: remove ips patch parsing from sd filesystem
Parsing the SD fs is very slow. In addition, the only KIPs are either a) atmosphere modules, or b) FS.

The IPS subsystem was originally designed to make nogc/etc patches work for FS,
but these are now internal, and it appears that the literal only kip patches
that exist are for piracy.

It just doesn't make sense to slow down boot for every normal user for a feature
that has no actual usecase, and especially when fusee is already so minimal.
2023-11-13 12:45:00 -07:00
Michael Scire 872c18c501 kern: fix some comment typos 2023-11-01 10:25:31 -07:00
Michael Scire afc0e14556 kern/svc: fix query/insecure names 2023-11-01 10:24:13 -07:00
Michael Scire f7bf379cfe git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "80bf6aeee"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "80bf6aeee"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-10-27 16:22:45 -07:00
Michael Scire 9f26419b1a ams: bump version to 1.6.2, add changelog 2023-10-27 16:21:57 -07:00
Michael Scire 1b057d48c6 sm: fix compat with new service macros 2023-10-26 14:44:45 -07:00
Michael Scire 0c3afff4d3 pm: update to reflect 17.0.0 internal design changes 2023-10-26 14:44:32 -07:00
Michael Scire 274f6b63f2 erpt: add remaining SubmitFsInfo helpers 2023-10-25 16:08:12 -07:00
Michael Scire 2ed8450446 erpt: SubmitFileSystemProxyErrorInfo 2023-10-25 14:21:27 -07:00
Michael Scire 60974a5f4e erpt: GetMmcErrorInfo, GetSdCard*Info 2023-10-25 12:41:18 -07:00
Michael Scire fa384fd920 erpt: begin SubmitFsinfo (SubmitMmcDetailInfo) 2023-10-25 04:45:41 -07:00
Michael Scire 3f19db0d96 jpegdec: fix abort check on output width 2023-10-18 02:33:59 -07:00
Michael Scire a84f725e21 jpegdec: update to reflect 17.0.0 changes 2023-10-18 02:31:26 -07:00
Michael Scire 7f61dfdb8d pm: since 15.0.0, WaitApplicationMemoryAvailable is more lenient 2023-10-17 11:25:35 -07:00
Michael Scire c44da84869 pm: adjust resource limit function names 2023-10-17 11:10:09 -07:00
Liam 7f4450f930 haze: pretend to be able to write MTP server code 2023-10-16 17:31:09 -04:00
Michael Scire edb4e2ea56 git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "965e05b3c"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "965e05b3c"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-10-16 12:31:15 -07:00
Michael Scire 183f3e0d7e ams: bump version to 1.6.1 2023-10-16 12:30:35 -07:00
Michael Scire 7650c5eb96 docs: add changelog for 1.6.1 2023-10-16 12:30:18 -07:00
Liam 0ff197b300 haze: fix bMaxPacketSize0 2023-10-16 08:36:33 -07:00
Liam d9fff85bc4 haze: use gpu console for rendering 2023-10-16 08:36:10 -07:00
Liam c866c15856 haze: implement GetObjectPropList 2023-10-16 08:36:10 -07:00
Michael Scire e8ac23e2ee ncm: fix two comments 2023-10-16 08:24:07 -07:00
Michael Scire 3a8cffef57 ncm: better detect + fix 17 brick after-the-fact
This adds detection for missing-save or empty-save, and rebuilds in either case.
2023-10-16 02:38:30 -07:00
Liam 13411902c9 fs: add missing stub for GetProgramId 2023-10-14 07:52:47 -07:00
Michael Scire 693fb423cb kern: fix minor sin 2023-10-12 14:25:17 -07:00
Michael Scire 8a9eb85e05 git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "132558c33"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "132558c33"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-10-12 09:23:31 -07:00
Michael Scire d389ef639e docs: add changelog for 1.6.0 2023-10-12 09:19:26 -07:00
Michael Scire 719858ad18 git subrepo push emummc
subrepo:
  subdir:   "emummc"
  merged:   "9513a5412"
upstream:
  origin:   "https://github.com/m4xw/emummc"
  branch:   "develop"
  commit:   "9513a5412"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-10-12 09:02:30 -07:00
Michael Scire e4d08ae0c5 erpt: amend min-version for latest CreateReportWithAttachments 2023-10-12 08:55:58 -07:00
Michael Scire 0c063db926 loader: add usb3 patches for 17.0.0 2023-10-12 08:55:58 -07:00
Michael Scire 02e987819b ncm: work around change in Nintendo save handling behavior
Static save files do not require an entry in the save data indexer to mount.
Prior to 17.0.0, save data files were considered static if userid was 0.
In 17.0.0+, only 8000000000000000 is static.

However, some users using cfw do not have an entry for 8000000000000120 in the indexer,
for various reasons (but mostly manual nand-restore, I think). Thus, on boot of 17.0.0+,
FS will say 8000000000000120 is not present (not in indexer), and NCM will create it anew.

The 8000000000000120 save will then be empty, and then the firmware can't boot.

To workaround this, logic has been re-enabled on 17.0.0+ for building the content meta database.
Thus, if the user encounters this error, the 8000000000000120 save will be emptied, but then
it will be automatically reconstructed, fixing the problem.
2023-10-12 08:55:58 -07:00
Michael Scire 2ec3e141c7 bpc.mitm/exo: support pmic reboot/shutdown on mariko (thanks @CTCaer) 2023-10-12 08:55:58 -07:00
Michael Scire 71d0274884 erpt: remove deprecated fields, they didn't actually change IDs, just the mapping between id and name table index 2023-10-12 08:55:58 -07:00
Michael Scire 05259b7519 emummc: fix offsets for 17.0.0 2023-10-12 08:55:58 -07:00
Michael Scire 59a24fa646 fusee: support parsing 17.0.0+ INI 2023-10-12 08:55:58 -07:00
Michael Scire f5b2eab4a8 exo: fix up new titlekey option extents 2023-10-12 08:55:58 -07:00
Michael Scire e96e1063e2 jpegdec: stop bundling (TODO post-prerelease) 2023-10-12 08:55:58 -07:00
Michael Scire aa170a72a9 erpt: Add basic (TODO-impl post-prerelease) support for 17.0.0 changes 2023-10-12 08:55:58 -07:00
Michael Scire 9d4cb685a7 fs: update OpenCodeFileSystem abi for 17.0.0 2023-10-12 08:55:58 -07:00
Michael Scire c95741142e ncm: update for new 17.0.0 apis 2023-10-12 08:55:58 -07:00
Michael Scire ef9b111bbf emummc: update for 17.0.0 2023-10-12 08:55:58 -07:00
Michael Scire 114b82284d exo/spl: Add new EsCommonKeyType 2023-10-12 08:55:58 -07:00
Michael Scire c5d7ca5159 fusee/exo: implement the usual changes for new firmware support 2023-10-12 08:55:58 -07:00
Michael Scire 6d0bf70783 kern: fix assert usage in process load 2023-10-12 08:55:58 -07:00
Michael Scire aba6ca7329 kern: bump supported version to 17.x 2023-10-12 08:55:58 -07:00
Michael Scire 06a840e550 kern: fix operation type enum-value whoops 2023-10-12 08:55:58 -07:00
Michael Scire 11c02e22e0 kern: implement support for applying relr relocations 2023-10-12 08:55:58 -07:00
Michael Scire f93aea4c06 kern: split Process/Thread exit to separate WorkerTaskManagers 2023-10-12 08:55:58 -07:00
Michael Scire 4ddfb6183c kern: split out GetInstructionDataUserMode in exception handler 2023-10-12 08:55:58 -07:00
Michael Scire 3737151a2f kern: Add special-case for InvalidateProcessDataCache on current process 2023-10-12 08:55:58 -07:00
Michael Scire 2a4d68f916 kern: KPageTable: remove MapFirst operation, replace with MapFirstGroup 2023-10-12 08:55:58 -07:00
Michael Scire 7b523cfc8d kern: note OnFinalize calls in KPageTable::Finalize 2023-10-12 08:55:58 -07:00
Michael Scire 39a95d4023 kern: implement new default application system resource field in KProcess 2023-10-12 08:55:58 -07:00
Michael Scire 2c5002ce50 kern: update KMemoryRegionType values for new ids + SecureUnknown region 2023-10-12 08:55:58 -07:00
Michael Scire b7384a8667 kern: KSupervisorPageTable now checks wxn instead of setting it 2023-10-12 08:55:58 -07:00
Michael Scire 85b5f20395 kern: KPageTable::Initialize no longer takes unused process id 2023-10-12 08:55:58 -07:00
Michael Scire ad5bd81d3f kern: implement PermissionLock, update KPageTableBase attribute/alignment checks 2023-10-12 08:55:58 -07:00
Michael Scire 777b6d285c kern: KPageTableBase::CheckMemoryState now invokes a helper 2023-10-12 08:55:58 -07:00
Michael Scire ae2c25e9c8 kern: update KMemoryState, remove bijection (separate IoRegister/IoMemory) 2023-10-12 08:55:58 -07:00
Michael Scire 3b8f65d502 kern: update initial process load logic to do per-segment mapping/decompression 2023-10-12 08:55:58 -07:00
Michael Scire cfd2d5b012 kern: clear new pages in init page allocator, not init page table 2023-10-12 08:55:58 -07:00
Michael Scire c72ba35684 kern: add speculation barriers after eret 2023-10-12 08:55:58 -07:00
Michael Scire ec96203cb7 kern: remove unnecessary fields from InitArgs (0x80 -> 0x40) 2023-10-12 08:55:58 -07:00
Michael Scire 1491a7b159 kern: on second thought, move vectors back to end of text 2023-10-12 08:55:58 -07:00
Michael Scire 0daef4a6e8 kern/ldr: move crt0 into .rodata 2023-10-12 08:55:58 -07:00
Michael Scire 4ca3c44e5f kern: pass ini1 size from loader to kernel, remove slab memset from init0 2023-10-12 08:55:58 -07:00
Michael Scire add4b3fdc3 utils: update erpt script 2023-10-12 08:55:58 -07:00
Liam 159f8d384b dmnt.gen2: enable attach to arbitrary program id 2023-10-11 19:50:09 -07:00
Liam 92a8c8eb88 haze: implement android operations 2023-10-11 18:57:49 -07:00
Liam 9e0daff46e haze: split operations by type 2023-10-11 18:57:49 -07:00
Liam 6b72dbd22d haze: refactor constant use for cleaner separation 2023-10-11 18:57:49 -07:00
Michael Scire ba91f070e8 mesosphere: remove nostartfiles from specs files (should only be passed to linker wrapper) 2023-09-17 09:33:56 -07:00
Michael Scire 4fe9a89ab8 docs: commit saddest changelog of all time, this is your brain on going to vacation 2023-08-21 17:30:06 -07:00
Michael Scire de73f6c5bb git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "c3dc418a2"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "c3dc418a2"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-08-21 17:27:16 -07:00
Michael Scire e488b6ee47 ams: add enum recognition for 16.1.0 2023-08-21 17:26:27 -07:00
Michael Scire 99810dc091 fssystem: fix typo error in BucketTree::Visitor::MoveNext 2023-08-15 14:47:09 -07:00
Liam e54957285f libstrat: comment fixes for fssystem 2023-08-12 11:33:29 -07:00
Michael Scire fca213460b libstrat: fix more minor style issues pointed out by Pharynx 2023-08-10 11:48:28 -07:00
Michael Scire 4e6bd19fcd fssystem: fix stupid issue in NodeBuffer move-ctor
Code compiles with this constructor deleted, so it wasn't used, but even so...
2023-08-03 16:58:55 -07:00
Michael Scire 8b88351cb4 mitm: fix errant include on non-hos 2023-06-06 19:32:06 -07:00
Michael Scire 63ea152349 fatal: take good idea from shrek 2023-05-19 11:47:38 -07:00
Michael Scire 3cb54e2b4b git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "f4a966eb4"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "f4a966eb4"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-05-14 03:17:20 -07:00
Michael Scire e9de11a746 ams: bump version to 1.5.4 2023-05-14 03:16:27 -07:00
Michael Scire b979b5aa36 docs: 1.5.4 changelog, zelda why the fuck 2023-05-14 03:16:15 -07:00
SciresM f2ee44da74
romfs/ams.mitm/pm: refactor to dynamically steal heap for certain games. (#2122)
* fs.mitm: skeleton the use of special allocation in romfs build

* pm: add api for ams.mitm to steal application memory

* pm/mitm: okay, that api won't work, try a different one

* romfs: revert memory usage increases; we'll handle torture games case-by-case.

* pm/romfs: first (broken?) pass at dynamic heap.

I cannot wait to figure out all the ways this is wrong.

* Release the dynamic heap a little more eagerly

* romfs: animal crossing is also not a nice game

* romfs: fix issues in close-during-build

* romfs: zelda is a blight upon this earth
2023-05-14 03:06:52 -07:00
Michael Scire 85c23b5781 fusee: actually identify new FS 2023-05-08 18:38:13 -07:00
Michael Scire 8e042f2262 git subrepo push emummc
subrepo:
  subdir:   "emummc"
  merged:   "30205111e"
upstream:
  origin:   "https://github.com/m4xw/emummc"
  branch:   "develop"
  commit:   "30205111e"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-05-08 18:25:25 -07:00
Michael Scire 81e9154a52 emummc: add enums for 16.0.3 2023-05-08 18:24:49 -07:00
Michael Scire e9b9dbc2aa docs: add changelog for 1.5.3 2023-05-08 18:19:31 -07:00
Michael Scire 7e6c849ca4 git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "cd0fc2c1d"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "cd0fc2c1d"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-05-08 18:06:50 -07:00
Michael Scire 8ec7c096d0 git subrepo push emummc
subrepo:
  subdir:   "emummc"
  merged:   "d2fcc73eb"
upstream:
  origin:   "https://github.com/m4xw/emummc"
  branch:   "develop"
  commit:   "d2fcc73eb"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-05-08 18:05:36 -07:00
Michael Scire 6e5b901a9b emummc: support 16.0.3 2023-05-08 18:03:35 -07:00
Michael Scire b800953d66 ams: recognize 16.0.3('s FS) 2023-05-08 17:51:13 -07:00
Michael Scire f0240db75a fs.mitm: mitm glue for font replacement, before I forget 2023-05-08 17:40:10 -07:00
Michael Scire 1f5ec68a5c ams: fix compilation with gcc 13 2023-05-07 03:36:46 -07:00
Michael Scire ed9e60acb9 kern: track heap in KPageTableBase::MemoryRange 2023-04-30 16:50:53 -07:00
Liam a7300b0fa4 haze: fix file size transmission issue 2023-04-18 22:15:48 -07:00
Liam 8e2eca2004 haze: abstract firmware version and serial number fetch 2023-04-18 22:15:48 -07:00
Michael Scire 9f83b3c838 ams: I really need to automate keeping this in sync 2023-04-17 20:57:57 -07:00
Michael Scire 434c8cefc4 git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "ecc8b1811"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "ecc8b1811"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-04-17 20:40:41 -07:00
Michael Scire d8aed7de6d ams: add 16.0.2 enum 2023-04-17 20:40:02 -07:00
Liam a346014dc7 haze: get serial number and firmware version from set:sys 2023-04-17 22:22:28 -04:00
Michael Scire 4b3c801e9f dist: add haze to output zip 2023-04-17 14:35:25 -07:00
Michael Scire 90db1223f6 docs: avoid unintended insinuations 2023-04-17 14:24:27 -07:00
Michael Scire fa64a6ff4d docs: pretend to be literate 2023-04-17 14:20:16 -07:00
Michael Scire 0c6a06a0cf git subrepo push libraries
subrepo:
  subdir:   "libraries"
  merged:   "7340e2eab"
upstream:
  origin:   "https://github.com/Atmosphere-NX/Atmosphere-libs"
  branch:   "master"
  commit:   "7340e2eab"
git-subrepo:
  version:  "0.4.1"
  origin:   "???"
  commit:   "???"
2023-04-17 14:19:59 -07:00
Michael Scire 5efb4a2a98 docs: bump version, 16.0.1 exists too 2023-04-17 14:19:18 -07:00
liamwhite 3b662122f9
troposphere: add haze MTP server (#2095)
* troposphere: add haze MTP server

* haze: adjust banner, new lines between class sections, single-statement if

* haze: remove additional newlines between sections

* haze: avoid use of reference out parameter

* haze: console_main_loop: style

* haze: event_reactor, file_system_proxy, ptp: style

* haze: ptp_data_builder, ptp_object_database, ptp_object_heap, results, usb_session: style

* haze: event_reactor, ptp_data_parser, async_usb_server, ptp_object_database, ptp_object_heap: style

* haze: ptp_responder: style

* haze: usb_session: style

* haze: use svc defs from vapours

* haze: misc comments

* haze: fix pointer overflow check

* haze: fix copy paste error

* haze: use alignment, invalidate cached use values in console

* haze: fix stray hex constant

* haze: misc style

* haze: fixes for windows

* haze: add GetObjectPropsSupported, GetObjectPropDesc, GetObjectPropValue

* haze: add SetObjectPropValue

* haze: improve object database API

* haze: improve WriteVariableLengthData readability

* haze: fix directory renames on windows

* haze: ptp_object_database: fix collation

* haze: event_reactor: fix size validation

* haze: ptp_responder: improve documentation

* haze: ptp_responder: avoid unnecessary fs interaction in GetObjectPropValue

* haze: ptp_responder: fix object deletion on windows

* haze: tear down sessions on suspension

* haze: prefer more specific data types

* haze: misc

* haze: fix usb 3.0 support

* haze: improve host-to-device file transfer performance

* haze: report device serial

* haze: move static_assert to requires, fix alignment
2023-04-17 14:08:05 -07:00
Michael Scire e9b28ab4b1 kern: adjust wording to be more technically correct 2023-03-27 10:54:58 -07:00
Michael Scire 1afb184c14 docs: pretend I know how to type common words 2023-03-14 01:52:06 -07:00
Michael Scire 5e070600a9 docs: add faq.md
"Alexa, what does June 15th mean?"
2023-03-14 01:48:19 -07:00
347 changed files with 12930 additions and 3161 deletions

View file

@ -3,7 +3,7 @@
=====
![License](https://img.shields.io/badge/License-GPLv2-blue.svg)
[![Chat on Discord](https://camo.githubusercontent.com/b4175720ede4f2621aa066ffbabb70ae30044679/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f636861742d446973636f72642d627269676874677265656e2e737667)](https://discordapp.com/invite/ZdqEhed)
[![Chat on Discord](https://img.shields.io/badge/Discord-5865f2?logo=discord&logoColor=white)](https://discordapp.com/invite/ZdqEhed)
Atmosphère is a work-in-progress customized firmware for the Nintendo Switch.
@ -27,7 +27,6 @@ This software is licensed under the terms of the GPLv2, with exemptions for spec
You can find a copy of the license in the [LICENSE file](LICENSE).
Exemptions:
* The [yuzu Nintendo Switch emulator](https://github.com/yuzu-emu/yuzu) and the [Ryujinx Team and Contributors](https://github.com/orgs/Ryujinx) are exempt from GPLv2 licensing. They are permitted, each at their individual discretion, to instead license any source code authored for the Atmosphère project as either GPLv2 or later or the [MIT license](https://github.com/Atmosphere-NX/Atmosphere/blob/master/docs/licensing_exemptions/MIT_LICENSE). In doing so, they may alter, supplement, or entirely remove the copyright notice for each file they choose to relicense. Neither the Atmosphère project nor its individual contributors shall assert their moral rights against any of the aforementioned projects.
* [Nintendo](https://github.com/Nintendo) is exempt from GPLv2 licensing and may (at its option) instead license any source code authored for the Atmosphère project under the Zero-Clause BSD license.
Credits

View file

@ -51,6 +51,8 @@ dist: dist-no-debug
cp $(CURRENT_DIRECTORY)/stratosphere/spl/$(ATMOSPHERE_OUT_DIR)/spl.elf $(DIST_DIR)/spl.elf
cp $(CURRENT_DIRECTORY)/stratosphere/TioServer/$(ATMOSPHERE_OUT_DIR)/TioServer.elf $(DIST_DIR)/TioServer.elf
cp $(CURRENT_DIRECTORY)/troposphere/daybreak/daybreak.elf $(DIST_DIR)/daybreak.elf
cp $(CURRENT_DIRECTORY)/troposphere/haze/haze.elf $(DIST_DIR)/haze.elf
cp $(CURRENT_DIRECTORY)/troposphere/reboot_to_payload/reboot_to_payload.elf $(DIST_DIR)/reboot_to_payload.elf
cd $(DIST_DIR); zip -r ../atmosphere-$(ATMOSPHERE_VERSION)-debug.zip ./*; cd ../;
rm -rf $(DIST_DIR)
@ -82,7 +84,7 @@ dist-no-debug: package3 $(CURRENT_DIRECTORY)/$(ATMOSPHERE_OUT_DIR)
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000034
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000036
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000037
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c
#mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000042
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000420
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000b240
@ -96,7 +98,7 @@ dist-no-debug: package3 $(CURRENT_DIRECTORY)/$(ATMOSPHERE_OUT_DIR)
cp stratosphere/fatal/$(ATMOSPHERE_OUT_DIR)/fatal.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000034/exefs.nsp
cp stratosphere/creport/$(ATMOSPHERE_OUT_DIR)/creport.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000036/exefs.nsp
cp stratosphere/ro/$(ATMOSPHERE_OUT_DIR)/ro.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000037/exefs.nsp
cp stratosphere/jpegdec/$(ATMOSPHERE_OUT_DIR)/jpegdec.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c/exefs.nsp
#cp stratosphere/jpegdec/$(ATMOSPHERE_OUT_DIR)/jpegdec.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c/exefs.nsp
cp stratosphere/pgl/$(ATMOSPHERE_OUT_DIR)/pgl.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000042/exefs.nsp
cp stratosphere/LogManager/$(ATMOSPHERE_OUT_DIR)/LogManager.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000420/exefs.nsp
cp stratosphere/htc/$(ATMOSPHERE_OUT_DIR)/htc.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000b240/exefs.nsp
@ -106,6 +108,7 @@ dist-no-debug: package3 $(CURRENT_DIRECTORY)/$(ATMOSPHERE_OUT_DIR)
rm -r $(DIST_DIR)/stratosphere_romfs
cp troposphere/reboot_to_payload/reboot_to_payload.nro $(DIST_DIR)/switch/reboot_to_payload.nro
cp troposphere/daybreak/daybreak.nro $(DIST_DIR)/switch/daybreak.nro
cp troposphere/haze/haze.nro $(DIST_DIR)/switch/haze.nro
cd $(DIST_DIR); zip -r ../atmosphere-$(ATMOSPHERE_VERSION).zip ./*; cd ../;
rm -rf $(DIST_DIR)
cp fusee/$(ATMOSPHERE_BOOT_OUT_DIR)/fusee.bin $(CURRENT_DIRECTORY)/$(ATMOSPHERE_OUT_DIR)/fusee.bin

View file

@ -1,4 +1,90 @@
# Changelog
## 1.7.0
+ Basic support was added for 18.0.0.
+ The console should boot and atmosphère should be fully functional. However, not all modules have been fully updated to reflect the latest changes.
+ There shouldn't be anything user visible resulting from this, but it will be addressed in a future atmosphère update, once I am not traveling so much.
+ `exosphère` was updated to reflect the latest official secure monitor behavior.
+ `mesosphère` was updated to reflect the latest official kernel behavior.
+ `spl` was updated to reflect the latest official behavior.
+ `fusee`'s no longer supports applying IPS patches to KIPs.
+ The only KIPs that are ever present are a) atmosphère modules, b) custom system modules, or c) FS.
+ The IPS subsystem was originally designed to make nogc patches work for FS, but these are now internal, and it appears the literal only kip patches that exist are for piracy.
+ I could not find any kip patches posted anywhere made for any other purpose.
+ It fundamentally does not make sense to slow down boot for every normal user for a feature that has no actual use-case, especially when `fusee` seeks to be a minimal bootloader.
+ Minor improvements were made to atmosphere's gdbstub, including:
+ Support was added for QStartNoAckMode.
+ An issue was fixed that could cause a fatal error when creating too many breakpoints.
+ A number of minor issues were fixed and improvements were made, including:
+ `pt-BR` (`PortugueseBr`) is now accepted as a valid language when overriding game locales.
+ A bug was fixed that could cause atmosphere to incorrectly serialize output object IDs over IPC when using domain objects.
+ A bug was fixed in `pm`'s resource limit boost logic that could potentially cause legitimate boosts to fail in certain circumstances.
+ `loader`/`ro` will now throw a fatal error when using invalid IPS patches that go out of bounds, instead of corrupting memory.
+ Support was fixed for booting using a memory configuration of half of the true available memory (e.g. forcing a 4GB configuration on an 8GB board).
+ General system stability improvements to enhance the user's experience.
## 1.6.2
+ Support was finished for 17.0.0.
+ `erpt` was updated to support the latest official behavior.
+ `jpegdec` was updated to support the latest official behavior.
+ `pm` was updated to support the latest official behavior.
+ General system stability improvements to enhance the user's experience.
## 1.6.1
+ An improved solution to [the problem that would cause consoles which had previously re-built their SYSTEM partition to brick on update-to-17.0.0](https://gist.github.com/SciresM/2ddb708c812ed585c4d99f54e25205ff) was added.
+ In particular, booting atmosphère will now automatically detect the problem and unbrick any consoles which have fallen into this state.
+ Some improvements were made to `haze`, including:
+ Performance was greatly improved:
+ Support was added for GetObjectPropList, which decreases the amount of requests made by ~8x.
+ Haze now performs rendering on the GPU, freeing up the CPU to respond to requests in a more timely manner.
+ An issue was fixed with how `haze` configures `bMaxPacketSize0` which improves support for USB3.
+ General system stability improvements to enhance the user's experience.
## 1.6.0
+ Basic support was added for 17.0.0.
+ The console should boot and atmosphère should be fully functional. However, not all modules have been fully updated to reflect the latest changes.
+ There shouldn't be anything user visible resulting from this, but it will be addressed in a soon-to-come atmosphère update.
+ `exosphère` was updated to reflect the latest official secure monitor behavior.
+ `mesosphère` was updated to reflect the latest official kernel behavior.
+ `ncm` was updated to reflect the latest official behavior.
+ `erpt` was partially updated to support the latest official behavior.
+ Atmosphere's gdbstub now supports waiting to attach to a specific program id on launch (as opposed to any application).
+ The monitor command for this is `monitor wait <hex program id>`, where program id can optionally have an `0x` prefix.
+ Support was added to `haze` for editing files in-place and performing 64-bit transfers (files larger than 4 GB).
+ `bpc.mitm` was enabled on Mariko units, and now triggers pmic-based shutdowns/reboots (thanks @CTCaer).
+ This should cause the console to no longer wake ~15 seconds after shutdown on Mariko.
+ A number of minor issues were fixed and improvements were made, including:
+ A workaround was added for a change in 17.0.0 that would cause consoles which had previously re-built their SYSTEM partition to brick on update-to-17.0.0.
+ General system stability improvements to enhance the user's experience.
## 1.5.5
+ Support was added for 16.1.0.
+ General system stability improvements to enhance the user's experience.
## 1.5.4
+ Experimental new functionality was implemented to prevent crashing when building romfs for certain games with obscene file counts.
+ This includes both Fire Emblem: Engage (~190000 files), and The Legend of Zelda: Tears of the Kingdom (~300000) files.
+ The solution involved adding functionality to ams.mitm/pm to dynamically steal memory from the application (and system) pool as needed when the games have romfs mods.
+ No memory is taken, and there is no cost to this functionality when playing without mods (or with overrides disabled).
+ The Legend of Zelda: Tears of the Kingdom is currently the absolute worst case game, requiring ~48 MB of memory to build a romfs image to play with mods.
+ Right now, the memory is sourced as follows: 32 MB (base ams.mitm heap), 10 MB (stolen from application pool), 8 MB (dynamically stolen from system pool).
+ This is 50 MB, which allows a little overhead in the worst case (prevents crashing due to exhausting the heap for other allocations in ams.mitm).
+ Zelda is remarkably sensitive to memory being stolen from the application pool, tolerating no more than 16 MB on 1.0.0 and 12 MB on 1.1.0. I have chosen to steal 10 MB, to be safe, for now.
+ This may break on a future game update, but I will fix it if and when that happens. There is no perfect solution; the game simply requires too much memory to support mods flawlessly, and I am forced to compromise.
+ As usual, if you encounter a game that exhausts ams.mitm's memory (crashing it) when loading layeredfs mods, please contact `SciresM#0524`.
"I am jinxing myself by saying this, but it's really hard to imagine any game being worse than The Legend of Zelda: Tears of the Kingdom, but if it happens again I will drop everything to fix it as usual".
+ General system stability improvements to enhance the user's experience.
## 1.5.3
+ Support was added for 16.0.3.
+ Atmosphère was updated to use GCC 13/newlib (latest devkitA64/devkitARM releases).
+ **Please note**: This introduces a known issue, which is currently being worked on.
+ As you may recall from the 1.4.1 changelog, Fire Emblem: Engage requires enormous amounts of memory to support using layeredfs mods with the game.
+ Latest GCC/newlib slightly increases malloc overhead size, which makes the previous memory increase insufficient.
+ A general-case solution to this is in the works, which should hopefully fix the problem in a way that doesn't jinx me for the future.
+ A number of minor issues were fixed and improvements were made, including:
+ An issue was fixed that caused system font replacement to not work on 16.0.0+.
+ An minor accuracy issue was addressed in mesosphere's management of certain memory ranges; this issue would have had zero visible impact to the end-user.
+ General system stability improvements to enhance the user's experience.
## 1.5.2
+ A homebrew application (`haze`) was added for performing USB file transfer (with thanks to @liamwhite for both design and implementation).
+ `haze` is included with atmosphère, and provides access to the SD card via the PTP/MTP protocol.
+ **Please note**: haze will show inside the homebrew menu under the name "USB File Transfer".
+ **Please note**: Atmosphère cannot be updated at runtime, and trying to install an atmosphère update via haze will fail as usual.
+ General system stability improvements to enhance the user's experience.
## 1.5.1
+ `fatal` was updated to reduce memory footprint.
+ Starting in 16.0.0, official `fatal` has no framebuffer or rendering logic, and instead calls other system service commands to draw the screen.
@ -34,6 +120,7 @@
+ The additional memory here is taken from the applet pool; no issues are expected to arise from this, but please report anything you may run into.
+ As usual, if you encounter a game that exhausts ams.mitm's memory (crashing it) when loading layeredfs mods, please contact `SciresM#0524`.
+ I am jinxing myself by saying this, but it's really hard to imagine any game being worse than Fire Emblem: Engage, but if it happens again I will drop everything to fix it as usual.
+ General system stability improvements to enhance the user's experience.
## 1.4.0
+ Support was added for 15.0.0.
+ `mesosphère` was updated to reflect the latest official kernel behavior.

18
docs/faq.md Normal file
View file

@ -0,0 +1,18 @@
# Frequently Asked Questions
This document serves as a place to store answers for common questions received about Atmosphère.
## What does "June 15th" mean?
When Atmosphère began development in February 2018, "June 15" was given as the estimate/target date for a first release, to coincide with the planned disclosure of a vulnerability.
This deadline was missed, hard.
People made rather a lot of fun of me (SciresM) for this.
Several months later, when the first Atmosphère release occurred, I captioned it "Happy June 15th!" and pretended like I hadn't missed the first deadline.
This amused me a lot, and so the practice has been kept up for every single release since.
Depending on who you ask, you may be told that this is a dumb joke and it is not funny.
This is incorrect. It is definitely a dumb joke, but it is also hilarious.

View file

@ -27,3 +27,6 @@ A list of planned features for Atmosphère can be found [here](roadmap.md).
## Release History
A changelog of previous versions of Atmosphère can be found [here](changelog.md).
## Frequently Asked Questions
Answers to one or more frequently asked questions may be found [here](faq.md).

4
emummc/.gitrepo vendored
View file

@ -6,7 +6,7 @@
[subrepo]
remote = https://github.com/m4xw/emummc
branch = develop
commit = bba1f1fb65f75721caf6d023a35d75d3c9aafd0f
parent = 1ab8b234447864503e21c600681219a96962e6c1
commit = 832b2442685b45b086697ffe09c5fde05d7444e9
parent = 3ccb0ae02bc06769f44d61233bf177301ba9d5f3
method = merge
cmdver = 0.4.1

2
emummc/README.md vendored
View file

@ -2,7 +2,7 @@
*A SDMMC driver replacement for Nintendo's Filesystem Services, by **m4xw***
### Supported Horizon Versions
**1.0.0 - 16.0.0**
**1.0.0 - 18.0.0**
## Features
* Arbitrary SDMMC backend selection

View file

@ -65,6 +65,12 @@
#include "offsets/1500_exfat.h"
#include "offsets/1600.h"
#include "offsets/1600_exfat.h"
#include "offsets/1603.h"
#include "offsets/1603_exfat.h"
#include "offsets/1700.h"
#include "offsets/1700_exfat.h"
#include "offsets/1800.h"
#include "offsets/1800_exfat.h"
#include "../utils/fatal.h"
#define GET_OFFSET_STRUCT_NAME(vers) g_offsets##vers
@ -141,6 +147,12 @@ DEFINE_OFFSET_STRUCT(_1500);
DEFINE_OFFSET_STRUCT(_1500_EXFAT);
DEFINE_OFFSET_STRUCT(_1600);
DEFINE_OFFSET_STRUCT(_1600_EXFAT);
DEFINE_OFFSET_STRUCT(_1603);
DEFINE_OFFSET_STRUCT(_1603_EXFAT);
DEFINE_OFFSET_STRUCT(_1700);
DEFINE_OFFSET_STRUCT(_1700_EXFAT);
DEFINE_OFFSET_STRUCT(_1800);
DEFINE_OFFSET_STRUCT(_1800_EXFAT);
const fs_offsets_t *get_fs_offsets(enum FS_VER version) {
switch (version) {
@ -242,6 +254,18 @@ const fs_offsets_t *get_fs_offsets(enum FS_VER version) {
return &(GET_OFFSET_STRUCT_NAME(_1600));
case FS_VER_16_0_0_EXFAT:
return &(GET_OFFSET_STRUCT_NAME(_1600_EXFAT));
case FS_VER_16_0_3:
return &(GET_OFFSET_STRUCT_NAME(_1603));
case FS_VER_16_0_3_EXFAT:
return &(GET_OFFSET_STRUCT_NAME(_1603_EXFAT));
case FS_VER_17_0_0:
return &(GET_OFFSET_STRUCT_NAME(_1700));
case FS_VER_17_0_0_EXFAT:
return &(GET_OFFSET_STRUCT_NAME(_1700_EXFAT));
case FS_VER_18_0_0:
return &(GET_OFFSET_STRUCT_NAME(_1800));
case FS_VER_18_0_0_EXFAT:
return &(GET_OFFSET_STRUCT_NAME(_1800_EXFAT));
default:
fatal_abort(Fatal_UnknownVersion);
}

View file

@ -95,6 +95,15 @@ enum FS_VER
FS_VER_16_0_0,
FS_VER_16_0_0_EXFAT,
FS_VER_16_0_3,
FS_VER_16_0_3_EXFAT,
FS_VER_17_0_0,
FS_VER_17_0_0_EXFAT,
FS_VER_18_0_0,
FS_VER_18_0_0_EXFAT,
FS_VER_MAX,
};

59
emummc/source/FS/offsets/1603.h vendored Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1603_H__
#define __FS_1603_H__
// Accessor vtable getters
#define FS_OFFSET_1603_SDMMC_ACCESSOR_GC 0x1862F0
#define FS_OFFSET_1603_SDMMC_ACCESSOR_SD 0x187F70
#define FS_OFFSET_1603_SDMMC_ACCESSOR_NAND 0x1867B0
// Hooks
#define FS_OFFSET_1603_SDMMC_WRAPPER_READ 0x182240
#define FS_OFFSET_1603_SDMMC_WRAPPER_WRITE 0x1822A0
#define FS_OFFSET_1603_RTLD 0x269B0
#define FS_OFFSET_1603_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x3C)))
#define FS_OFFSET_1603_CLKRST_SET_MIN_V_CLK_RATE 0x1A2D80
// Misc funcs
#define FS_OFFSET_1603_LOCK_MUTEX 0x17B780
#define FS_OFFSET_1603_UNLOCK_MUTEX 0x17B7D0
#define FS_OFFSET_1603_SDMMC_WRAPPER_CONTROLLER_OPEN 0x182200
#define FS_OFFSET_1603_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x182220
// Misc Data
#define FS_OFFSET_1603_SD_MUTEX 0xFFB3F0
#define FS_OFFSET_1603_NAND_MUTEX 0xFF6B58
#define FS_OFFSET_1603_ACTIVE_PARTITION 0xFF6B98
#define FS_OFFSET_1603_SDMMC_DAS_HANDLE 0xFDC8B0
// NOPs
#define FS_OFFSET_1603_SD_DAS_INIT 0x258D4
// Nintendo Paths
#define FS_OFFSET_1603_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00063B98, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x00070DBC, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007795C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0008A7A4, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1603_H__

59
emummc/source/FS/offsets/1603_exfat.h vendored Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1603_EXFAT_H__
#define __FS_1603_EXFAT_H__
// Accessor vtable getters
#define FS_OFFSET_1603_EXFAT_SDMMC_ACCESSOR_GC 0x190FD0
#define FS_OFFSET_1603_EXFAT_SDMMC_ACCESSOR_SD 0x192C50
#define FS_OFFSET_1603_EXFAT_SDMMC_ACCESSOR_NAND 0x191490
// Hooks
#define FS_OFFSET_1603_EXFAT_SDMMC_WRAPPER_READ 0x18CF20
#define FS_OFFSET_1603_EXFAT_SDMMC_WRAPPER_WRITE 0x18CF80
#define FS_OFFSET_1603_EXFAT_RTLD 0x269B0
#define FS_OFFSET_1603_EXFAT_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x3C)))
#define FS_OFFSET_1603_EXFAT_CLKRST_SET_MIN_V_CLK_RATE 0x1ADA60
// Misc funcs
#define FS_OFFSET_1603_EXFAT_LOCK_MUTEX 0x186460
#define FS_OFFSET_1603_EXFAT_UNLOCK_MUTEX 0x1864B0
#define FS_OFFSET_1603_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0x18CEE0
#define FS_OFFSET_1603_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x18CF00
// Misc Data
#define FS_OFFSET_1603_EXFAT_SD_MUTEX 0x100D3F0
#define FS_OFFSET_1603_EXFAT_NAND_MUTEX 0x1008B58
#define FS_OFFSET_1603_EXFAT_ACTIVE_PARTITION 0x1008B98
#define FS_OFFSET_1603_EXFAT_SDMMC_DAS_HANDLE 0xFE98B0
// NOPs
#define FS_OFFSET_1603_EXFAT_SD_DAS_INIT 0x258D4
// Nintendo Paths
#define FS_OFFSET_1603_EXFAT_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00063B98, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x00070DBC, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007795C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0008A7A4, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1603_EXFAT_H__

59
emummc/source/FS/offsets/1700.h vendored Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1700_H__
#define __FS_1700_H__
// Accessor vtable getters
#define FS_OFFSET_1700_SDMMC_ACCESSOR_GC 0x18AD00
#define FS_OFFSET_1700_SDMMC_ACCESSOR_SD 0x18C9D0
#define FS_OFFSET_1700_SDMMC_ACCESSOR_NAND 0x18B1D0
// Hooks
#define FS_OFFSET_1700_SDMMC_WRAPPER_READ 0x186BC0
#define FS_OFFSET_1700_SDMMC_WRAPPER_WRITE 0x186C20
#define FS_OFFSET_1700_RTLD 0x29D10
#define FS_OFFSET_1700_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x3C)))
#define FS_OFFSET_1700_CLKRST_SET_MIN_V_CLK_RATE 0x1A7B60
// Misc funcs
#define FS_OFFSET_1700_LOCK_MUTEX 0x17FEA0
#define FS_OFFSET_1700_UNLOCK_MUTEX 0x17FEF0
#define FS_OFFSET_1700_SDMMC_WRAPPER_CONTROLLER_OPEN 0x186B80
#define FS_OFFSET_1700_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x186BA0
// Misc Data
#define FS_OFFSET_1700_SD_MUTEX 0xFCE3F0
#define FS_OFFSET_1700_NAND_MUTEX 0xFC9B78
#define FS_OFFSET_1700_ACTIVE_PARTITION 0xFC9BB8
#define FS_OFFSET_1700_SDMMC_DAS_HANDLE 0xFAF840
// NOPs
#define FS_OFFSET_1700_SD_DAS_INIT 0x28C64
// Nintendo Paths
#define FS_OFFSET_1700_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00068068, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x0007510C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007BEAC, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0008F674, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1700_H__

59
emummc/source/FS/offsets/1700_exfat.h vendored Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1700_EXFAT_H__
#define __FS_1700_EXFAT_H__
// Accessor vtable getters
#define FS_OFFSET_1700_EXFAT_SDMMC_ACCESSOR_GC 0x195B60
#define FS_OFFSET_1700_EXFAT_SDMMC_ACCESSOR_SD 0x197830
#define FS_OFFSET_1700_EXFAT_SDMMC_ACCESSOR_NAND 0x196030
// Hooks
#define FS_OFFSET_1700_EXFAT_SDMMC_WRAPPER_READ 0x191A20
#define FS_OFFSET_1700_EXFAT_SDMMC_WRAPPER_WRITE 0x191A80
#define FS_OFFSET_1700_EXFAT_RTLD 0x29D10
#define FS_OFFSET_1700_EXFAT_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x3C)))
#define FS_OFFSET_1700_EXFAT_CLKRST_SET_MIN_V_CLK_RATE 0x1B29C0
// Misc funcs
#define FS_OFFSET_1700_EXFAT_LOCK_MUTEX 0x18AD00
#define FS_OFFSET_1700_EXFAT_UNLOCK_MUTEX 0x18AD50
#define FS_OFFSET_1700_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0x1919E0
#define FS_OFFSET_1700_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x191A00
// Misc Data
#define FS_OFFSET_1700_EXFAT_SD_MUTEX 0xFE03F0
#define FS_OFFSET_1700_EXFAT_NAND_MUTEX 0xFDBB78
#define FS_OFFSET_1700_EXFAT_ACTIVE_PARTITION 0xFDBBB8
#define FS_OFFSET_1700_EXFAT_SDMMC_DAS_HANDLE 0xFBC840
// NOPs
#define FS_OFFSET_1700_EXFAT_SD_DAS_INIT 0x28C64
// Nintendo Paths
#define FS_OFFSET_1700_EXFAT_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00068068, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x0007510C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007BEAC, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0008F674, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1700_EXFAT_H__

59
emummc/source/FS/offsets/1800.h vendored Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1800_H__
#define __FS_1800_H__
// Accessor vtable getters
#define FS_OFFSET_1800_SDMMC_ACCESSOR_GC 0x18AB00
#define FS_OFFSET_1800_SDMMC_ACCESSOR_SD 0x18C800
#define FS_OFFSET_1800_SDMMC_ACCESSOR_NAND 0x18AFE0
// Hooks
#define FS_OFFSET_1800_SDMMC_WRAPPER_READ 0x186A50
#define FS_OFFSET_1800_SDMMC_WRAPPER_WRITE 0x186AB0
#define FS_OFFSET_1800_RTLD 0x2A3A4
#define FS_OFFSET_1800_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x44)))
#define FS_OFFSET_1800_CLKRST_SET_MIN_V_CLK_RATE 0x1A77D0
// Misc funcs
#define FS_OFFSET_1800_LOCK_MUTEX 0x17FCC0
#define FS_OFFSET_1800_UNLOCK_MUTEX 0x17FD10
#define FS_OFFSET_1800_SDMMC_WRAPPER_CONTROLLER_OPEN 0x186A10
#define FS_OFFSET_1800_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x186A30
// Misc Data
#define FS_OFFSET_1800_SD_MUTEX 0xFD13F0
#define FS_OFFSET_1800_NAND_MUTEX 0xFCCB28
#define FS_OFFSET_1800_ACTIVE_PARTITION 0xFCCB68
#define FS_OFFSET_1800_SDMMC_DAS_HANDLE 0xFB1950
// NOPs
#define FS_OFFSET_1800_SD_DAS_INIT 0x28F24
// Nintendo Paths
#define FS_OFFSET_1800_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00068B08, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x000758DC, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007C77C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x000905C4, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1800_H__

59
emummc/source/FS/offsets/1800_exfat.h vendored Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1800_EXFAT_H__
#define __FS_1800_EXFAT_H__
// Accessor vtable getters
#define FS_OFFSET_1800_EXFAT_SDMMC_ACCESSOR_GC 0x195B90
#define FS_OFFSET_1800_EXFAT_SDMMC_ACCESSOR_SD 0x197890
#define FS_OFFSET_1800_EXFAT_SDMMC_ACCESSOR_NAND 0x196070
// Hooks
#define FS_OFFSET_1800_EXFAT_SDMMC_WRAPPER_READ 0x191AE0
#define FS_OFFSET_1800_EXFAT_SDMMC_WRAPPER_WRITE 0x191B40
#define FS_OFFSET_1800_EXFAT_RTLD 0x2A3A4
#define FS_OFFSET_1800_EXFAT_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x44)))
#define FS_OFFSET_1800_EXFAT_CLKRST_SET_MIN_V_CLK_RATE 0x1B2860
// Misc funcs
#define FS_OFFSET_1800_EXFAT_LOCK_MUTEX 0x18AD50
#define FS_OFFSET_1800_EXFAT_UNLOCK_MUTEX 0x18ADA0
#define FS_OFFSET_1800_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0x191AA0
#define FS_OFFSET_1800_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x191AC0
// Misc Data
#define FS_OFFSET_1800_EXFAT_SD_MUTEX 0xFE33F0
#define FS_OFFSET_1800_EXFAT_NAND_MUTEX 0xFDEB28
#define FS_OFFSET_1800_EXFAT_ACTIVE_PARTITION 0xFDEB68
#define FS_OFFSET_1800_EXFAT_SDMMC_DAS_HANDLE 0xFBE950
// NOPs
#define FS_OFFSET_1800_EXFAT_SD_DAS_INIT 0x28F24
// Nintendo Paths
#define FS_OFFSET_1800_EXFAT_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00068B08, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x000758DC, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007C77C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x000905C4, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1800_EXFAT_H__

View file

@ -85,10 +85,10 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
/* We can get away with only including latest because exosphere supports newer-than-expected master key in engine. */
/* TODO: Update on next change of keys. */
/* Mariko Development Master Kek Source. */
.byte 0x3A, 0x9C, 0xF0, 0x39, 0x70, 0x23, 0xF6, 0xAF, 0x71, 0x44, 0x60, 0xF4, 0x6D, 0xED, 0xA1, 0xD6
.byte 0xE4, 0x45, 0xD0, 0x14, 0xA0, 0xE5, 0xE9, 0x4B, 0xFE, 0x76, 0xF4, 0x29, 0x41, 0xBB, 0x64, 0xED
/* Mariko Production Master Kek Source. */
.byte 0xA5, 0xEC, 0x16, 0x39, 0x1A, 0x30, 0x16, 0x08, 0x2E, 0xCF, 0x09, 0x6F, 0x5E, 0x7C, 0xEE, 0xA9
.byte 0x4F, 0x41, 0x3C, 0x3B, 0xFB, 0x6A, 0x01, 0x2A, 0x68, 0x9F, 0x83, 0xE9, 0x53, 0xBD, 0x16, 0xD2
/* Development Master Key Vectors. */
.byte 0x46, 0x22, 0xB4, 0x51, 0x9A, 0x7E, 0xA7, 0x7F, 0x62, 0xA1, 0x1F, 0x8F, 0xC5, 0x3A, 0xDB, 0xFE /* Zeroes encrypted with Master Key 00. */
@ -107,6 +107,8 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x08, 0xE0, 0xF4, 0xBE, 0xAA, 0x6E, 0x5A, 0xC3, 0xA6, 0xBC, 0xFE, 0xB9, 0xE2, 0xA3, 0x24, 0x12 /* Master key 0C encrypted with Master key 0D. */
.byte 0xD6, 0x80, 0x98, 0xC0, 0xFA, 0xC7, 0x13, 0xCB, 0x93, 0xD2, 0x0B, 0x82, 0x4C, 0xA1, 0x7B, 0x8D /* Master key 0D encrypted with Master key 0E. */
.byte 0x78, 0x66, 0x19, 0xBD, 0x86, 0xE7, 0xC1, 0x09, 0x9B, 0x6F, 0x92, 0xB2, 0x58, 0x7D, 0xCF, 0x26 /* Master key 0E encrypted with Master key 0F. */
.byte 0x39, 0x1E, 0x7E, 0xF8, 0x7E, 0x73, 0xEA, 0x6F, 0xAF, 0x00, 0x3A, 0xB4, 0xAA, 0xB8, 0xB7, 0x59 /* Master key 0F encrypted with Master key 10. */
.byte 0x0C, 0x75, 0x39, 0x15, 0x53, 0xEA, 0x81, 0x11, 0xA3, 0xE0, 0xDC, 0x3D, 0x0E, 0x76, 0xC6, 0xB8 /* Master key 10 encrypted with Master key 11. */
/* Production Master Key Vectors. */
.byte 0x0C, 0xF0, 0x59, 0xAC, 0x85, 0xF6, 0x26, 0x65, 0xE1, 0xE9, 0x19, 0x55, 0xE6, 0xF2, 0x67, 0x3D /* Zeroes encrypted with Master Key 00. */
@ -125,6 +127,8 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x83, 0x67, 0xAF, 0x01, 0xCF, 0x93, 0xA1, 0xAB, 0x80, 0x45, 0xF7, 0x3F, 0x72, 0xFD, 0x3B, 0x38 /* Master key 0C encrypted with Master key 0D. */
.byte 0xB1, 0x81, 0xA6, 0x0D, 0x72, 0xC7, 0xEE, 0x15, 0x21, 0xF3, 0xC0, 0xB5, 0x6B, 0x61, 0x6D, 0xE7 /* Master key 0D encrypted with Master key 0E. */
.byte 0xAF, 0x11, 0x4C, 0x67, 0x17, 0x7A, 0x52, 0x43, 0xF7, 0x70, 0x2F, 0xC7, 0xEF, 0x81, 0x72, 0x16 /* Master key 0E encrypted with Master key 0F. */
.byte 0x25, 0x12, 0x8B, 0xCB, 0xB5, 0x46, 0xA1, 0xF8, 0xE0, 0x52, 0x15, 0xB7, 0x0B, 0x57, 0x00, 0xBD /* Master key 0F encrypted with Master key 10. */
.byte 0x58, 0x15, 0xD2, 0xF6, 0x8A, 0xE8, 0x19, 0xAB, 0xFB, 0x2D, 0x52, 0x9D, 0xE7, 0x55, 0xF3, 0x93 /* Master key 10 encrypted with Master key 11. */
/* Device Master Key Source Sources. */
.byte 0x8B, 0x4E, 0x1C, 0x22, 0x42, 0x07, 0xC8, 0x73, 0x56, 0x94, 0x08, 0x8B, 0xCC, 0x47, 0x0F, 0x5D /* 4.0.0 Device Master Key Source Source. */
@ -140,6 +144,8 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x5B, 0x94, 0x63, 0xF7, 0xAD, 0x96, 0x1B, 0xA6, 0x23, 0x30, 0x06, 0x4D, 0x01, 0xE4, 0xCE, 0x1D /* 14.0.0 Device Master Key Source Source. */
.byte 0x5E, 0xC9, 0xC5, 0x0A, 0xD0, 0x5F, 0x8B, 0x7B, 0xA7, 0x39, 0xEA, 0xBC, 0x60, 0x0F, 0x74, 0xE6 /* 15.0.0 Device Master Key Source Source. */
.byte 0xEA, 0x90, 0x6E, 0xA8, 0xAE, 0x92, 0x99, 0x64, 0x36, 0xC1, 0xF3, 0x1C, 0xC6, 0x32, 0x83, 0x8C /* 16.0.0 Device Master Key Source Source. */
.byte 0xDA, 0xB9, 0xD6, 0x77, 0x52, 0x2D, 0x1F, 0x78, 0x73, 0xC9, 0x98, 0x5B, 0x06, 0xFE, 0xA0, 0x52 /* 17.0.0 Device Master Key Source Source. */
.byte 0x14, 0xF5, 0xA5, 0xD0, 0x73, 0x6D, 0x44, 0x80, 0x5F, 0x31, 0x5A, 0x8F, 0x1E, 0xD4, 0x0D, 0x63 /* 18.0.0 Device Master Key Source Source. */
/* Development Device Master Kek Sources. */
.byte 0xD6, 0xBD, 0x9F, 0xC6, 0x18, 0x09, 0xE1, 0x96, 0x20, 0x39, 0x60, 0xD2, 0x89, 0x83, 0x31, 0x34 /* 4.0.0 Device Master Kek Source. */
@ -155,6 +161,8 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0xCE, 0x14, 0x74, 0x66, 0x98, 0xA8, 0x6D, 0x7D, 0xBD, 0x54, 0x91, 0x68, 0x5F, 0x1D, 0x0E, 0xEA /* 14.0.0 Device Master Kek Source. */
.byte 0xAE, 0x05, 0x48, 0x65, 0xAB, 0x17, 0x9D, 0x3D, 0x51, 0xB7, 0x56, 0xBD, 0x9B, 0x0B, 0x5B, 0x6E /* 15.0.0 Device Master Kek Source. */
.byte 0xFF, 0xF6, 0x4B, 0x0F, 0xFF, 0x0D, 0xC0, 0x4F, 0x56, 0x8A, 0x40, 0x74, 0x67, 0xC5, 0xFE, 0x9F /* 16.0.0 Device Master Kek Source. */
.byte 0x4E, 0xCE, 0x7B, 0x2A, 0xEA, 0x2E, 0x3D, 0x16, 0xD5, 0x2A, 0xDE, 0xF6, 0xF8, 0x6A, 0x7D, 0x43 /* 17.0.0 Device Master Kek Source. */
.byte 0x3B, 0x00, 0x89, 0xD7, 0xA9, 0x9E, 0xB7, 0x70, 0x86, 0x00, 0xC3, 0x49, 0x52, 0x8C, 0xA4, 0xAF /* 18.0.0 Device Master Kek Source. */
/* Production Device Master Kek Sources. */
.byte 0x88, 0x62, 0x34, 0x6E, 0xFA, 0xF7, 0xD8, 0x3F, 0xE1, 0x30, 0x39, 0x50, 0xF0, 0xB7, 0x5D, 0x5D /* 4.0.0 Device Master Kek Source. */
@ -170,3 +178,5 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x67, 0xD5, 0xD6, 0x0C, 0x08, 0xF5, 0xA3, 0x11, 0xBD, 0x6D, 0x5A, 0xEB, 0x96, 0x24, 0xB0, 0xD2 /* 14.0.0 Device Master Kek Source. */
.byte 0x7C, 0x30, 0xED, 0x8B, 0x39, 0x25, 0x2C, 0x08, 0x8F, 0x48, 0xDC, 0x28, 0xE6, 0x1A, 0x6B, 0x49 /* 15.0.0 Device Master Kek Source. */
.byte 0xF0, 0xF3, 0xFF, 0x52, 0x75, 0x2F, 0xBA, 0x4D, 0x09, 0x72, 0x30, 0x89, 0xA9, 0xDF, 0xFE, 0x1F /* 16.0.0 Device Master Kek Source. */
.byte 0x21, 0xD6, 0x35, 0xF1, 0x0F, 0x7A, 0xF0, 0x5D, 0xDF, 0x79, 0x1C, 0x7A, 0xE4, 0x32, 0x82, 0x9E /* 17.0.0 Device Master Kek Source. */
.byte 0xE7, 0x85, 0x8C, 0xA2, 0xF4, 0x49, 0xCB, 0x07, 0xD1, 0x8E, 0x48, 0x1B, 0xE8, 0x1E, 0x28, 0x3B /* 18.0.0 Device Master Kek Source. */

View file

@ -94,7 +94,7 @@ namespace ams::secmon::boot {
}
/* Check that the key generation is one that we can use. */
static_assert(pkg1::KeyGeneration_Count == 16);
static_assert(pkg1::KeyGeneration_Count == 18);
if (key_generation >= pkg1::KeyGeneration_Count) {
return false;
}

View file

@ -70,6 +70,15 @@ namespace ams::secmon {
}
void PerformUserRebootByPmic() {
/* Ensure that i2c-5 is usable for communicating with the pmic. */
clkrst::EnableI2c5Clock();
i2c::Initialize(i2c::Port_5);
/* Reboot. */
pmic::ShutdownSystem(true);
}
void PerformUserRebootToRcm() {
/* Configure the bootrom to boot to rcm. */
reg::Write(PMC + APBDEV_PMC_SCRATCH0, 0x2);
@ -100,11 +109,20 @@ namespace ams::secmon {
}
void PerformUserShutDown() {
/* Load our reboot stub to iram. */
LoadRebootStub(RebootStubAction_ShutDown);
if (fuse::GetSocType() == fuse::SocType_Mariko) {
/* Ensure that i2c-5 is usable for communicating with the pmic. */
clkrst::EnableI2c5Clock();
i2c::Initialize(i2c::Port_5);
/* Reboot. */
PerformPmcReboot();
/* On Mariko shutdown via pmic. */
pmic::ShutdownSystem(false);
} else /* if (fuse::GetSocType() == fuse::SocType_Erista) */ {
/* Load our reboot stub to iram. */
LoadRebootStub(RebootStubAction_ShutDown);
/* Reboot. */
PerformPmcReboot();
}
}
}

View file

@ -23,11 +23,13 @@ namespace ams::secmon {
UserRebootType_ToRcm = 1,
UserRebootType_ToPayload = 2,
UserRebootType_ToFatalError = 3,
UserRebootType_ByPmic = 4,
};
void PerformUserRebootToRcm();
void PerformUserRebootToPayload();
void PerformUserRebootToFatalError();
void PerformUserRebootByPmic();
void PerformUserShutDown();
}

View file

@ -161,6 +161,7 @@ namespace ams::secmon::smc {
constexpr const u8 EsCommonKeySources[EsCommonKeyType_Count][AesKeySize] = {
[EsCommonKeyType_TitleKey] = { 0x1E, 0xDC, 0x7B, 0x3B, 0x60, 0xE6, 0xB4, 0xD8, 0x78, 0xB8, 0x17, 0x15, 0x98, 0x5E, 0x62, 0x9B },
[EsCommonKeyType_ArchiveKey] = { 0x3B, 0x78, 0xF2, 0x61, 0x0F, 0x9D, 0x5A, 0xE2, 0x7B, 0x4E, 0x45, 0xAF, 0xCB, 0x0B, 0x67, 0x4D },
[EsCommonKeyType_Unknown2] = { 0x42, 0x64, 0x0B, 0xE3, 0x5F, 0xC6, 0xBE, 0x47, 0xC7, 0xB4, 0x84, 0xC5, 0xEB, 0x63, 0xAA, 0x02 },
};
constexpr const u8 EsSealKeySource[AesKeySize] = {

View file

@ -22,6 +22,7 @@ namespace ams::secmon::smc {
enum EsCommonKeyType {
EsCommonKeyType_TitleKey = 0,
EsCommonKeyType_ArchiveKey = 1,
EsCommonKeyType_Unknown2 = 2,
EsCommonKeyType_Count,
};

View file

@ -357,6 +357,9 @@ namespace ams::secmon::smc {
case UserRebootType_ToFatalError:
PerformUserRebootToFatalError();
break;
case UserRebootType_ByPmic:
PerformUserRebootByPmic();
break;
default:
return SmcResult::InvalidArgument;
}
@ -365,18 +368,17 @@ namespace ams::secmon::smc {
case UserRebootType_ToFatalError:
PerformUserRebootToFatalError();
break;
case UserRebootType_ByPmic:
PerformUserRebootByPmic();
break;
default:
return SmcResult::InvalidArgument;
}
}
break;
case ConfigItem::ExosphereNeedsShutdown:
if (soc_type == fuse::SocType_Erista) {
if (args.r[3] != 0) {
PerformUserShutDown();
}
} else /* if (soc_type == fuse::SocType_Mariko) */ {
return SmcResult::NotSupported;
if (args.r[3] != 0) {
PerformUserShutDown();
}
break;
case ConfigItem::ExospherePayloadAddress:

View file

@ -32,8 +32,8 @@ namespace ams::secmon::smc {
struct PrepareEsDeviceUniqueKeyOption {
using KeyGeneration = util::BitPack32::Field<0, 6, int>;
using Type = util::BitPack32::Field<6, 1, EsCommonKeyType>;
using Reserved = util::BitPack32::Field<7, 25, u32>;
using Type = util::BitPack32::Field<6, 2, EsCommonKeyType>;
using Reserved = util::BitPack32::Field<8, 24, u32>;
};
constexpr const u8 ModularExponentiateByStorageKeyTable[] = {

View file

@ -96,7 +96,7 @@ namespace ams::nxboot::loader {
}
void Uncompress(void *dst, size_t dst_size, const void *src, size_t src_size) {
/* Create an execute a decompressor. */
/* Create and execute a decompressor. */
Lz4Uncompressor(dst, dst_size, src, src_size).Uncompress();
}

View file

@ -23,17 +23,17 @@ namespace ams::nxboot {
alignas(se::AesBlockSize) constexpr inline const u8 MarikoMasterKekSource[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0xA5, 0xEC, 0x16, 0x39, 0x1A, 0x30, 0x16, 0x08, 0x2E, 0xCF, 0x09, 0x6F, 0x5E, 0x7C, 0xEE, 0xA9
0x4F, 0x41, 0x3C, 0x3B, 0xFB, 0x6A, 0x01, 0x2A, 0x68, 0x9F, 0x83, 0xE9, 0x53, 0xBD, 0x16, 0xD2
};
alignas(se::AesBlockSize) constexpr inline const u8 MarikoMasterKekSourceDev[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0x3A, 0x9C, 0xF0, 0x39, 0x70, 0x23, 0xF6, 0xAF, 0x71, 0x44, 0x60, 0xF4, 0x6D, 0xED, 0xA1, 0xD6
0xE4, 0x45, 0xD0, 0x14, 0xA0, 0xE5, 0xE9, 0x4B, 0xFE, 0x76, 0xF4, 0x29, 0x41, 0xBB, 0x64, 0xED
};
alignas(se::AesBlockSize) constexpr inline const u8 EristaMasterKekSource[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0x99, 0x22, 0x09, 0x57, 0xA7, 0xF9, 0x5E, 0x94, 0xFE, 0x78, 0x7F, 0x41, 0xD6, 0xE7, 0x56, 0xE6
0x00, 0x04, 0x5D, 0xF0, 0x4D, 0xCD, 0x14, 0xA3, 0x1C, 0xBF, 0xDE, 0x48, 0x55, 0xBA, 0x35, 0xC1
};
alignas(se::AesBlockSize) constexpr inline const u8 KeyblobKeySource[se::AesBlockSize] = {
@ -70,6 +70,8 @@ namespace ams::nxboot {
{ 0x5B, 0x94, 0x63, 0xF7, 0xAD, 0x96, 0x1B, 0xA6, 0x23, 0x30, 0x06, 0x4D, 0x01, 0xE4, 0xCE, 0x1D }, /* 14.0.0 Device Master Key Source Source. */
{ 0x5E, 0xC9, 0xC5, 0x0A, 0xD0, 0x5F, 0x8B, 0x7B, 0xA7, 0x39, 0xEA, 0xBC, 0x60, 0x0F, 0x74, 0xE6 }, /* 15.0.0 Device Master Key Source Source. */
{ 0xEA, 0x90, 0x6E, 0xA8, 0xAE, 0x92, 0x99, 0x64, 0x36, 0xC1, 0xF3, 0x1C, 0xC6, 0x32, 0x83, 0x8C }, /* 16.0.0 Device Master Key Source Source. */
{ 0xDA, 0xB9, 0xD6, 0x77, 0x52, 0x2D, 0x1F, 0x78, 0x73, 0xC9, 0x98, 0x5B, 0x06, 0xFE, 0xA0, 0x52 }, /* 17.0.0 Device Master Key Source Source. */
{ 0x14, 0xF5, 0xA5, 0xD0, 0x73, 0x6D, 0x44, 0x80, 0x5F, 0x31, 0x5A, 0x8F, 0x1E, 0xD4, 0x0D, 0x63 }, /* 18.0.0 Device Master Key Source Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 DeviceMasterKekSources[pkg1::OldDeviceMasterKeyCount][se::AesBlockSize] = {
@ -86,6 +88,8 @@ namespace ams::nxboot {
{ 0x67, 0xD5, 0xD6, 0x0C, 0x08, 0xF5, 0xA3, 0x11, 0xBD, 0x6D, 0x5A, 0xEB, 0x96, 0x24, 0xB0, 0xD2 }, /* 14.0.0 Device Master Kek Source. */
{ 0x7C, 0x30, 0xED, 0x8B, 0x39, 0x25, 0x2C, 0x08, 0x8F, 0x48, 0xDC, 0x28, 0xE6, 0x1A, 0x6B, 0x49 }, /* 15.0.0 Device Master Kek Source. */
{ 0xF0, 0xF3, 0xFF, 0x52, 0x75, 0x2F, 0xBA, 0x4D, 0x09, 0x72, 0x30, 0x89, 0xA9, 0xDF, 0xFE, 0x1F }, /* 16.0.0 Device Master Kek Source. */
{ 0x21, 0xD6, 0x35, 0xF1, 0x0F, 0x7A, 0xF0, 0x5D, 0xDF, 0x79, 0x1C, 0x7A, 0xE4, 0x32, 0x82, 0x9E }, /* 17.0.0 Device Master Kek Source. */
{ 0xE7, 0x85, 0x8C, 0xA2, 0xF4, 0x49, 0xCB, 0x07, 0xD1, 0x8E, 0x48, 0x1B, 0xE8, 0x1E, 0x28, 0x3B }, /* 18.0.0 Device Master Kek Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 DeviceMasterKekSourcesDev[pkg1::OldDeviceMasterKeyCount][se::AesBlockSize] = {
@ -102,6 +106,8 @@ namespace ams::nxboot {
{ 0xCE, 0x14, 0x74, 0x66, 0x98, 0xA8, 0x6D, 0x7D, 0xBD, 0x54, 0x91, 0x68, 0x5F, 0x1D, 0x0E, 0xEA }, /* 14.0.0 Device Master Kek Source. */
{ 0xAE, 0x05, 0x48, 0x65, 0xAB, 0x17, 0x9D, 0x3D, 0x51, 0xB7, 0x56, 0xBD, 0x9B, 0x0B, 0x5B, 0x6E }, /* 15.0.0 Device Master Kek Source. */
{ 0xFF, 0xF6, 0x4B, 0x0F, 0xFF, 0x0D, 0xC0, 0x4F, 0x56, 0x8A, 0x40, 0x74, 0x67, 0xC5, 0xFE, 0x9F }, /* 16.0.0 Device Master Kek Source. */
{ 0x4E, 0xCE, 0x7B, 0x2A, 0xEA, 0x2E, 0x3D, 0x16, 0xD5, 0x2A, 0xDE, 0xF6, 0xF8, 0x6A, 0x7D, 0x43 }, /* 17.0.0 Device Master Kek Source. */
{ 0x3B, 0x00, 0x89, 0xD7, 0xA9, 0x9E, 0xB7, 0x70, 0x86, 0x00, 0xC3, 0x49, 0x52, 0x8C, 0xA4, 0xAF }, /* 18.0.0 Device Master Kek Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 MasterKeySources[pkg1::KeyGeneration_Count][se::AesBlockSize] = {
@ -121,6 +127,8 @@ namespace ams::nxboot {
{ 0x83, 0x67, 0xAF, 0x01, 0xCF, 0x93, 0xA1, 0xAB, 0x80, 0x45, 0xF7, 0x3F, 0x72, 0xFD, 0x3B, 0x38 }, /* Master key 0C encrypted with Master key 0D. */
{ 0xB1, 0x81, 0xA6, 0x0D, 0x72, 0xC7, 0xEE, 0x15, 0x21, 0xF3, 0xC0, 0xB5, 0x6B, 0x61, 0x6D, 0xE7 }, /* Master key 0D encrypted with Master key 0E. */
{ 0xAF, 0x11, 0x4C, 0x67, 0x17, 0x7A, 0x52, 0x43, 0xF7, 0x70, 0x2F, 0xC7, 0xEF, 0x81, 0x72, 0x16 }, /* Master key 0E encrypted with Master key 0F. */
{ 0x25, 0x12, 0x8B, 0xCB, 0xB5, 0x46, 0xA1, 0xF8, 0xE0, 0x52, 0x15, 0xB7, 0x0B, 0x57, 0x00, 0xBD }, /* Master key 0F encrypted with Master key 10. */
{ 0x58, 0x15, 0xD2, 0xF6, 0x8A, 0xE8, 0x19, 0xAB, 0xFB, 0x2D, 0x52, 0x9D, 0xE7, 0x55, 0xF3, 0x93 }, /* Master key 10 encrypted with Master key 11. */
};
alignas(se::AesBlockSize) constexpr inline const u8 MasterKeySourcesDev[pkg1::KeyGeneration_Count][se::AesBlockSize] = {
@ -140,6 +148,8 @@ namespace ams::nxboot {
{ 0x08, 0xE0, 0xF4, 0xBE, 0xAA, 0x6E, 0x5A, 0xC3, 0xA6, 0xBC, 0xFE, 0xB9, 0xE2, 0xA3, 0x24, 0x12 }, /* Master key 0C encrypted with Master key 0D. */
{ 0xD6, 0x80, 0x98, 0xC0, 0xFA, 0xC7, 0x13, 0xCB, 0x93, 0xD2, 0x0B, 0x82, 0x4C, 0xA1, 0x7B, 0x8D }, /* Master key 0D encrypted with Master key 0E. */
{ 0x78, 0x66, 0x19, 0xBD, 0x86, 0xE7, 0xC1, 0x09, 0x9B, 0x6F, 0x92, 0xB2, 0x58, 0x7D, 0xCF, 0x26 }, /* Master key 0E encrypted with Master key 0F. */
{ 0x39, 0x1E, 0x7E, 0xF8, 0x7E, 0x73, 0xEA, 0x6F, 0xAF, 0x00, 0x3A, 0xB4, 0xAA, 0xB8, 0xB7, 0x59 }, /* Master key 0F encrypted with Master key 10. */
{ 0x0C, 0x75, 0x39, 0x15, 0x53, 0xEA, 0x81, 0x11, 0xA3, 0xE0, 0xDC, 0x3D, 0x0E, 0x76, 0xC6, 0xB8 }, /* Master key 10 encrypted with Master key 11. */
};
alignas(se::AesBlockSize) constinit u8 MasterKeys[pkg1::OldMasterKeyCount][se::AesBlockSize] = {};

View file

@ -80,7 +80,7 @@ namespace ams::nxboot {
}
/* Check that the key generation is one that we can use. */
static_assert(pkg1::KeyGeneration_Count == 16);
static_assert(pkg1::KeyGeneration_Count == 18);
if (key_generation >= pkg1::KeyGeneration_Count) {
return false;
}

View file

@ -257,6 +257,10 @@ namespace ams::nxboot {
return ams::TargetFirmware_15_0_0;
} else if (std::memcmp(package1 + 0x10, "20230111", 8) == 0) {
return ams::TargetFirmware_16_0_0;
} else if (std::memcmp(package1 + 0x10, "20230906", 8) == 0) {
return ams::TargetFirmware_17_0_0;
} else if (std::memcmp(package1 + 0x10, "20240207", 8) == 0) {
return ams::TargetFirmware_18_0_0;
}
break;
default:

View file

@ -24,6 +24,9 @@ namespace ams::nxboot {
namespace {
constexpr u32 MesoshereMetadataLayout0Magic = util::FourCC<'M','S','S','0'>::Code;
constexpr u32 MesoshereMetadataLayout1Magic = util::FourCC<'M','S','S','1'>::Code;
struct InitialProcessBinaryHeader {
static constexpr u32 Magic = util::FourCC<'I','N','I','1'>::Code;
@ -162,6 +165,15 @@ namespace ams::nxboot {
FsVersion_16_0_0,
FsVersion_16_0_0_Exfat,
FsVersion_16_0_3,
FsVersion_16_0_3_Exfat,
FsVersion_17_0_0,
FsVersion_17_0_0_Exfat,
FsVersion_18_0_0,
FsVersion_18_0_0_Exfat,
FsVersion_Count,
};
@ -239,10 +251,30 @@ namespace ams::nxboot {
{ 0x56, 0xE8, 0x56, 0x56, 0x6C, 0x38, 0xD8, 0xBE }, /* FsVersion_16_0_0 */
{ 0xCF, 0xAB, 0x45, 0x0C, 0x2C, 0x53, 0x9D, 0xA9 }, /* FsVersion_16_0_0_Exfat */
{ 0x39, 0xEE, 0x1F, 0x1E, 0x0E, 0xA7, 0x32, 0x5D }, /* FsVersion_16_0_3 */
{ 0x62, 0xC6, 0x5E, 0xFD, 0x9A, 0xBF, 0x7C, 0x43 }, /* FsVersion_16_0_3_Exfat */
{ 0x27, 0x07, 0x3B, 0xF0, 0xA1, 0xB8, 0xCE, 0x61 }, /* FsVersion_17_0_0 */
{ 0xEE, 0x0F, 0x4B, 0xAC, 0x6D, 0x1F, 0xFC, 0x4B }, /* FsVersion_17_0_0_Exfat */
{ 0x79, 0x5F, 0x5A, 0x5E, 0xB0, 0xC6, 0x77, 0x9E }, /* FsVersion_18_0_0 */
{ 0x1E, 0x2C, 0x64, 0xB1, 0xCC, 0xE2, 0x78, 0x24 }, /* FsVersion_18_0_0_Exfat */
};
const InitialProcessBinaryHeader *FindInitialProcessBinary(const pkg2::Package2Header *header, const u8 *data, ams::TargetFirmware target_firmware) {
if (target_firmware >= ams::TargetFirmware_8_0_0) {
if (target_firmware >= ams::TargetFirmware_17_0_0) {
const u32 *data_32 = reinterpret_cast<const u32 *>(data);
const u32 branch_target = (data_32[0] & 0x00FFFFFF);
for (size_t i = branch_target; i < branch_target + 0x1000 / sizeof(u32); ++i) {
const u32 ini_offset = (i * sizeof(u32)) + data_32[i];
if (data_32[i + 1] == 0 && ini_offset <= header->meta.payload_sizes[0] && std::memcmp(data + ini_offset, "INI1", 4) == 0) {
return reinterpret_cast<const InitialProcessBinaryHeader *>(data + ini_offset);
}
}
return nullptr;
} else if (target_firmware >= ams::TargetFirmware_8_0_0) {
/* Try to find initial process binary. */
const u32 *data_32 = reinterpret_cast<const u32 *>(data);
for (size_t i = 0; i < 0x1000 / sizeof(u32); ++i) {
@ -359,15 +391,6 @@ namespace ams::nxboot {
return nullptr;
}
InitialProcessMeta *FindInitialProcess(const se::Sha256Hash &hash) {
for (InitialProcessMeta *cur = std::addressof(g_initial_process_meta); cur != nullptr; cur = cur->next) {
if (std::memcmp(std::addressof(cur->kip_hash), std::addressof(hash), sizeof(hash)) == 0) {
return cur;
}
}
return nullptr;
}
u32 GetPatchSegments(const InitialProcessHeader *kip, u32 offset, size_t size) {
/* Create segment mask. */
u32 segments = 0;
@ -448,78 +471,6 @@ namespace ams::nxboot {
meta->patches_tail = new_patch;
}
void AddIps24PatchToKip(InitialProcessMeta *meta, const u8 *ips, s32 size) {
while (size > 0) {
/* Read offset, stopping at EOF */
const u32 offset = (static_cast<u32>(ips[0]) << 16) | (static_cast<u32>(ips[1]) << 8) | (static_cast<u32>(ips[2]) << 0);
if (offset == 0x454F46) {
break;
}
/* Read size. */
const u16 cur_size = (static_cast<u32>(ips[3]) << 8) | (static_cast<u32>(ips[4]) << 0);
if (cur_size > 0) {
/* Add patch. */
AddPatch(meta, offset, ips + 5, cur_size, false);
/* Advance. */
ips += (5 + cur_size);
size -= (5 + cur_size);
} else {
/* Read RLE size */
const u16 rle_size = (static_cast<u32>(ips[5]) << 8) | (static_cast<u32>(ips[6]) << 0);
/* Add patch. */
AddPatch(meta, offset, ips + 7, rle_size, true);
/* Advance. */
ips += 8;
size -= 8;
}
}
}
void AddIps32PatchToKip(InitialProcessMeta *meta, const u8 *ips, s32 size) {
while (size > 0) {
/* Read offset, stopping at EOF */
const u32 offset = (static_cast<u32>(ips[0]) << 24) | (static_cast<u32>(ips[1]) << 16) | (static_cast<u32>(ips[2]) << 8) | (static_cast<u32>(ips[3]) << 0);
if (offset == 0x45454F46) {
break;
}
/* Read size. */
const u16 cur_size = (static_cast<u32>(ips[4]) << 8) | (static_cast<u32>(ips[5]) << 0);
if (cur_size > 0) {
/* Add patch. */
AddPatch(meta, offset, ips + 6, cur_size, false);
/* Advance. */
ips += (6 + cur_size);
size -= (6 + cur_size);
} else {
/* Read RLE size */
const u16 rle_size = (static_cast<u32>(ips[6]) << 8) | (static_cast<u32>(ips[7]) << 0);
/* Add patch. */
AddPatch(meta, offset, ips + 8, rle_size, true);
/* Advance. */
ips += 9;
size -= 9;
}
}
}
void AddIpsPatchToKip(InitialProcessMeta *meta, const u8 *ips, s32 size) {
if (std::memcmp(ips, "PATCH", 5) == 0) {
AddIps24PatchToKip(meta, ips + 5, size - 5);
} else if (std::memcmp(ips, "IPS32", 5) == 0) {
AddIps32PatchToKip(meta, ips + 5, size - 5);
}
}
constexpr const u8 NogcPatch0[] = {
0x80
};
@ -656,6 +607,30 @@ namespace ams::nxboot {
AddPatch(fs_meta, 0x1913B9, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x16B950, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_16_0_3:
AddPatch(fs_meta, 0x186729, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x160CC0, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_16_0_3_Exfat:
AddPatch(fs_meta, 0x191409, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x16B9A0, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_17_0_0:
AddPatch(fs_meta, 0x18B149, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x165200, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_17_0_0_Exfat:
AddPatch(fs_meta, 0x195FA9, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x170060, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_18_0_0:
AddPatch(fs_meta, 0x18AF49, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x164B50, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_18_0_0_Exfat:
AddPatch(fs_meta, 0x195FD9, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x16FBE0, NogcPatch1, sizeof(NogcPatch1));
break;
default:
break;
}
@ -849,106 +824,6 @@ namespace ams::nxboot {
}
/* TODO ams.tma2: add mount_host patches. */
/* Add generic patches. */
{
/* Create patch path. */
char patch_path[0x220];
std::memcpy(patch_path, "sdmc:/atmosphere/kip_patches", 0x1D);
fs::DirectoryHandle patch_root_dir;
if (R_SUCCEEDED(fs::OpenDirectory(std::addressof(patch_root_dir), patch_path))) {
ON_SCOPE_EXIT { fs::CloseDirectory(patch_root_dir); };
s64 count;
fs::DirectoryEntry entries[1];
while (R_SUCCEEDED(fs::ReadDirectory(std::addressof(count), entries, patch_root_dir, util::size(entries))) && count > 0) {
/* Check that dir is a dir. */
if (fs::GetEntryType(entries[0]) != fs::DirectoryEntryType_Directory) {
continue;
}
/* For compatibility, ignore the old "default_nogc" patches. */
if (std::strcmp(entries[0].file_name, "default_nogc") == 0) {
continue;
}
/* Get filename length. */
const int dir_len = std::strlen(entries[0].file_name);
/* Adjust patch path. */
patch_path[0x1C] = '/';
std::memcpy(patch_path + 0x1D, entries[0].file_name, dir_len + 1);
/* Try to open the patch subdirectory. */
fs::DirectoryHandle patch_dir;
if (R_SUCCEEDED(fs::OpenDirectory(std::addressof(patch_dir), patch_path))) {
ON_SCOPE_EXIT { fs::CloseDirectory(patch_dir); };
/* Read patches. */
while (R_SUCCEEDED(fs::ReadDirectory(std::addressof(count), entries, patch_dir, util::size(entries))) && count > 0) {
/* Check that file is a file. */
if (fs::GetEntryType(entries[0]) != fs::DirectoryEntryType_File) {
continue;
}
/* Get filename length. */
const int name_len = std::strlen(entries[0].file_name);
/* Adjust patch path. */
patch_path[0x1D + dir_len] = '/';
std::memcpy(patch_path + 0x1D + dir_len + 1, entries[0].file_name, name_len + 1);
/* Check that file is "{hex}.ips" file. */
const int path_len = 0x1D + dir_len + 1 + name_len;
if (name_len != 0x44 || std::memcmp(patch_path + path_len - 4, ".ips", 5) != 0) {
continue;
}
/* Check that the filename is hex. */
bool valid_name = true;
se::Sha256Hash patch_name = {};
u32 shift = 4;
for (int i = 0; i < name_len - 4; ++i) {
const char c = entries[0].file_name[i];
u8 val;
if ('0' <= c && c <= '9') {
val = (c - '0');
} else if ('a' <= c && c <= 'f') {
val = (c - 'a') + 10;
} else if ('A' <= c && c <= 'F') {
val = (c - 'A') + 10;
} else {
valid_name = false;
break;
}
patch_name.bytes[i >> 1] |= val << shift;
shift ^= 4;
}
/* Ignore invalid patches. */
if (!valid_name) {
continue;
}
/* Find kip for the patch. */
auto *kip_meta = FindInitialProcess(patch_name);
if (kip_meta == nullptr) {
continue;
}
/* Read the ips patch. */
s64 file_size;
if (u8 *ips = static_cast<u8 *>(ReadFile(std::addressof(file_size), patch_path)); ips != nullptr) {
AddIpsPatchToKip(kip_meta, ips, static_cast<s32>(file_size));
}
}
}
}
}
}
}
/* Return the fs version we're using. */
@ -997,7 +872,20 @@ namespace ams::nxboot {
}
/* Set the embedded ini pointer. */
std::memcpy(payload_data + 8, std::addressof(meso_size), sizeof(meso_size));
const u32 magic = *reinterpret_cast<const u32 *>(payload_data + 4);
if (magic == MesoshereMetadataLayout0Magic) {
std::memcpy(payload_data + 8, std::addressof(meso_size), sizeof(meso_size));
} else if (magic == MesoshereMetadataLayout1Magic) {
if (const u32 meta_offset = *reinterpret_cast<const u32 *>(payload_data + 8); meta_offset <= meso_size - sizeof(meso_size)) {
s64 relative_offset = meso_size - meta_offset;
std::memcpy(payload_data + meta_offset, std::addressof(relative_offset), sizeof(relative_offset));
} else {
ShowFatalError("Invalid mesosphere metadata layout!\n");
}
} else {
ShowFatalError("Unknown mesosphere metadata version!\n");
}
/* Get the ini pointer. */
InitialProcessBinaryHeader * const ini = reinterpret_cast<InitialProcessBinaryHeader *>(payload_data + meso_size);

View file

@ -1002,14 +1002,16 @@ namespace ams::nxboot {
\
constexpr u32 SrcLow = RANGE_LOW(SRC_RANGE); \
constexpr u32 DstLow = RANGE_LOW(DST_RANGE); \
constexpr auto Shift = (SrcLow < DstLow) ? (DstLow - SrcLow) \
: (SrcLow - DstLow); \
\
cur_reg_value &= ~Mask; \
if constexpr (SrcLow == DstLow) { \
cur_reg_value |= (src_value & Mask); \
} else if constexpr (SrcLow < DstLow) { \
cur_reg_value |= ((src_value << (DstLow - SrcLow)) & Mask); \
cur_reg_value |= ((src_value << Shift) & Mask); \
} else { \
cur_reg_value |= ((src_value >> (SrcLow - DstLow)) & Mask); \
cur_reg_value |= ((src_value >> Shift) & Mask); \
} \
} \
}

View file

@ -6,7 +6,7 @@
[subrepo]
remote = https://github.com/Atmosphere-NX/Atmosphere-libs
branch = master
commit = b1607dc8a3d85bc8c859c60d70ebb4a3dcbb85b8
parent = f1ad26ce844ab7a63b948281b50c12e5d1dac8ce
commit = fadec2981727636ec7ba81d6c83995b7b9782190
parent = 410f23035efeb9e1bd399a020334793bba95bf91
method = merge
cmdver = 0.4.1

View file

@ -44,7 +44,7 @@ else ifeq ($(strip $(ATMOSPHERE_COMPILER_NAME)),clang)
export ATMOSPHERE_CFLAGS += -Wno-c99-designator -Wno-gnu-alignof-expression -Wno-unused-private-field
endif
export ATMOSPHERE_CXXFLAGS := -fno-rtti -fno-exceptions -std=gnu++20 -Wno-invalid-offsetof
export ATMOSPHERE_CXXFLAGS := -fno-rtti -fno-exceptions -std=gnu++23 -Wno-invalid-offsetof
export ATMOSPHERE_ASFLAGS :=

View file

@ -66,7 +66,7 @@ endif
ifeq ($(ATMOSPHERE_BOARD),nx-hac-001)
export LDFLAGS = -specs=$(ATMOSPHERE_LIBRARIES_DIR)/libstratosphere/stratosphere.specs -specs=$(DEVKITPRO)/libnx/switch.specs $(CXXFLAGS) $(CXXWRAPS) $(CXXREQUIRED) -Wl,-Map,$(notdir $*.map)
export LDFLAGS = -specs=$(ATMOSPHERE_LIBRARIES_DIR)/libstratosphere/stratosphere.specs -specs=$(DEVKITPRO)/libnx/switch.specs $(CXXFLAGS) $(CXXWRAPS) $(CXXREQUIRED) -Wl,-Map,$(notdir $*.map) -Wl,-z,relro,-z,now
else ifeq ($(ATMOSPHERE_OS_NAME),macos)
export LDFLAGS = $(CXXFLAGS) $(CXXWRAPS) $(CXXREQUIRED) -Wl,-map,$(notdir $@.map)
else

View file

@ -36,6 +36,8 @@ namespace ams::pkg1 {
KeyGeneration_14_0_0 = 0x0D,
KeyGeneration_15_0_0 = 0x0E,
KeyGeneration_16_0_0 = 0x0F,
KeyGeneration_17_0_0 = 0x10,
KeyGeneration_18_0_0 = 0x11,
KeyGeneration_Count,

View file

@ -23,8 +23,8 @@ namespace ams::pkg2 {
constexpr inline int PayloadCount = 3;
constexpr inline int MinimumValidDataVersion = 0; /* We allow older package2 to load; this value is currently 0x17 in Nintendo's code. */
constexpr inline int CurrentBootloaderVersion = 0x13;
constexpr inline int MinimumValidDataVersion = 0; /* We allow older package2 to load; this value is currently 0x18 in Nintendo's code. */
constexpr inline int CurrentBootloaderVersion = 0x15;
struct Package2Meta {
using Magic = util::FourCC<'P','K','2','1'>;

View file

@ -177,6 +177,7 @@ namespace ams::fuse {
}
constexpr const TargetFirmware FuseVersionIncrementFirmwares[] = {
TargetFirmware_17_0_0,
TargetFirmware_16_0_0,
TargetFirmware_15_0_0,
TargetFirmware_13_2_1,

View file

@ -19,13 +19,8 @@
namespace ams::kern::init {
struct alignas(util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE)) KInitArguments {
u64 ttbr0;
u64 ttbr1;
u64 tcr;
u64 mair;
u64 cpuactlr;
u64 cpuectlr;
u64 sctlr;
u64 sp;
u64 entrypoint;
u64 argument;
@ -33,13 +28,8 @@ namespace ams::kern::init {
static_assert(alignof(KInitArguments) == util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE));
static_assert(sizeof(KInitArguments) == std::max(INIT_ARGUMENTS_SIZE, util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE)));
static_assert(AMS_OFFSETOF(KInitArguments, ttbr0) == INIT_ARGUMENTS_TTBR0);
static_assert(AMS_OFFSETOF(KInitArguments, ttbr1) == INIT_ARGUMENTS_TTBR1);
static_assert(AMS_OFFSETOF(KInitArguments, tcr) == INIT_ARGUMENTS_TCR);
static_assert(AMS_OFFSETOF(KInitArguments, mair) == INIT_ARGUMENTS_MAIR);
static_assert(AMS_OFFSETOF(KInitArguments, cpuactlr) == INIT_ARGUMENTS_CPUACTLR);
static_assert(AMS_OFFSETOF(KInitArguments, cpuectlr) == INIT_ARGUMENTS_CPUECTLR);
static_assert(AMS_OFFSETOF(KInitArguments, sctlr) == INIT_ARGUMENTS_SCTLR);
static_assert(AMS_OFFSETOF(KInitArguments, sp) == INIT_ARGUMENTS_SP);
static_assert(AMS_OFFSETOF(KInitArguments, entrypoint) == INIT_ARGUMENTS_ENTRYPOINT);
static_assert(AMS_OFFSETOF(KInitArguments, argument) == INIT_ARGUMENTS_ARGUMENT);

View file

@ -87,9 +87,8 @@ namespace ams::kern::arch::arm64::init {
template<IsInitialPageAllocator PageAllocator>
static ALWAYS_INLINE KPhysicalAddress AllocateNewPageTable(PageAllocator &allocator, u64 phys_to_virt_offset) {
auto address = allocator.Allocate(PageSize);
ClearNewPageTable(address, phys_to_virt_offset);
return address;
MESOSPHERE_UNUSED(phys_to_virt_offset);
return allocator.Allocate(PageSize);
}
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address, u64 phys_to_virt_offset) {
@ -883,6 +882,12 @@ namespace ams::kern::arch::arm64::init {
const size_t ind_max = ((aligned_end - aligned_start) / align) - 1;
while (true) {
if (const uintptr_t random_address = aligned_start + (KSystemControl::Init::GenerateRandomRange(0, ind_max) * align); this->TryAllocate(random_address, size)) {
/* Clear the allocated pages. */
volatile u64 *ptr = reinterpret_cast<volatile u64 *>(random_address);
for (size_t i = 0; i < size / sizeof(u64); ++i) {
ptr[i] = 0;
}
return random_address;
}
}

View file

@ -94,3 +94,8 @@ label_done:
ENABLE_FPU(xtmp1) \
GET_THREAD_CONTEXT_AND_RESTORE_FPCR_FPSR(ctx, xtmp1, xtmp2, wtmp1, wtmp2) \
RESTORE_FPU32_ALL_REGISTERS(ctx, xtmp1)
#define ERET_WITH_SPECULATION_BARRIER \
eret; \
dsb nsh; \
isb

View file

@ -246,17 +246,12 @@
#define THREAD_LOCAL_REGION_SIZE 0x200
/* ams::kern::init::KInitArguments, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp */
#define INIT_ARGUMENTS_SIZE 0x50
#define INIT_ARGUMENTS_TTBR0 0x00
#define INIT_ARGUMENTS_TTBR1 0x08
#define INIT_ARGUMENTS_TCR 0x10
#define INIT_ARGUMENTS_MAIR 0x18
#define INIT_ARGUMENTS_CPUACTLR 0x20
#define INIT_ARGUMENTS_CPUECTLR 0x28
#define INIT_ARGUMENTS_SCTLR 0x30
#define INIT_ARGUMENTS_SP 0x38
#define INIT_ARGUMENTS_ENTRYPOINT 0x40
#define INIT_ARGUMENTS_ARGUMENT 0x48
#define INIT_ARGUMENTS_SIZE 0x28
#define INIT_ARGUMENTS_CPUACTLR 0x00
#define INIT_ARGUMENTS_CPUECTLR 0x08
#define INIT_ARGUMENTS_SP 0x10
#define INIT_ARGUMENTS_ENTRYPOINT 0x18
#define INIT_ARGUMENTS_ARGUMENT 0x20
/* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */
/* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */

View file

@ -372,6 +372,10 @@ namespace ams::kern::arch::arm64::cpu {
this->SetBit(19, en);
return *this;
}
constexpr ALWAYS_INLINE bool GetWxn() const {
return this->GetBits(19, 1) != 0;
}
};
/* Accessors for timer registers. */

View file

@ -178,7 +178,7 @@ namespace ams::kern::arch::arm64 {
}
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
Result Finalize();
private:
Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
@ -208,8 +208,8 @@ namespace ams::kern::arch::arm64 {
}
}
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll);
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll);
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);

View file

@ -30,6 +30,7 @@ namespace ams::kern::arch::arm64 {
KPhysicalAddress phys_addr;
size_t block_size;
u8 sw_reserved_bits;
u8 attr;
constexpr bool IsHeadMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHead) != 0; }
constexpr bool IsHeadAndBodyMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadAndBody) != 0; }

View file

@ -28,8 +28,8 @@ namespace ams::kern::arch::arm64 {
m_page_table.Activate(id);
}
Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
R_RETURN(m_page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, system_resource, resource_limit));
Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit));
}
void Finalize() { m_page_table.Finalize(); }
@ -110,12 +110,12 @@ namespace ams::kern::arch::arm64 {
R_RETURN(m_page_table.MapRegion(region_type, perm));
}
Result MapInsecureMemory(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.MapInsecureMemory(address, size));
Result MapInsecurePhysicalMemory(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.MapInsecurePhysicalMemory(address, size));
}
Result UnmapInsecureMemory(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.UnmapInsecureMemory(address, size));
Result UnmapInsecurePhysicalMemory(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.UnmapInsecurePhysicalMemory(address, size));
}
Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
@ -150,20 +150,24 @@ namespace ams::kern::arch::arm64 {
R_RETURN(m_page_table.InvalidateProcessDataCache(address, size));
}
Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.InvalidateCurrentProcessDataCache(address, size));
}
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
R_RETURN(m_page_table.ReadDebugMemory(buffer, address, size));
}
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size) {
R_RETURN(m_page_table.ReadDebugIoMemory(buffer, address, size));
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state) {
R_RETURN(m_page_table.ReadDebugIoMemory(buffer, address, size, state));
}
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
R_RETURN(m_page_table.WriteDebugMemory(address, buffer, size));
}
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size) {
R_RETURN(m_page_table.WriteDebugIoMemory(address, buffer, size));
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size, KMemoryState state) {
R_RETURN(m_page_table.WriteDebugIoMemory(address, buffer, size, state));
}
Result LockForMapDeviceAddressSpace(bool *out_is_io, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned, bool check_heap) {
@ -296,6 +300,7 @@ namespace ams::kern::arch::arm64 {
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInUnsafeAliasRegion(addr, size); }
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return m_page_table.CanContain(addr, size, state); }
bool CanContain(KProcessAddress addr, size_t size, ams::svc::MemoryState state) const { return m_page_table.CanContain(addr, size, state); }
KProcessAddress GetAddressSpaceStart() const { return m_page_table.GetAddressSpaceStart(); }
KProcessAddress GetHeapRegionStart() const { return m_page_table.GetHeapRegionStart(); }
@ -311,6 +316,8 @@ namespace ams::kern::arch::arm64 {
size_t GetKernelMapRegionSize() const { return m_page_table.GetKernelMapRegionSize(); }
size_t GetAliasCodeRegionSize() const { return m_page_table.GetAliasCodeRegionSize(); }
size_t GetAliasRegionExtraSize() const { return m_page_table.GetAliasRegionExtraSize(); }
size_t GetNormalMemorySize() const { return m_page_table.GetNormalMemorySize(); }
size_t GetCodeSize() const { return m_page_table.GetCodeSize(); }

View file

@ -23,9 +23,8 @@ namespace ams::kern::arch::arm64 {
class KSupervisorPageTable {
private:
KPageTable m_page_table;
u64 m_ttbr0_identity[cpu::NumCores];
public:
constexpr KSupervisorPageTable() : m_page_table(util::ConstantInitialize), m_ttbr0_identity() { /* ... */ }
constexpr KSupervisorPageTable() : m_page_table(util::ConstantInitialize) { /* ... */ }
NOINLINE void Initialize(s32 core_id);
@ -61,8 +60,6 @@ namespace ams::kern::arch::arm64 {
return m_page_table.GetPhysicalAddress(out, address);
}
constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return m_ttbr0_identity[core_id]; }
void DumpMemoryBlocks() const {
return m_page_table.DumpMemoryBlocks();
}

View file

@ -16,11 +16,10 @@
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp>
namespace ams::kern::arch::arm64::smc {
template<int SmcId, bool DisableInterrupt>
template<int SmcId>
void SecureMonitorCall(u64 *buf) {
/* Load arguments into registers. */
register u64 x0 asm("x0") = buf[0];
@ -32,34 +31,18 @@ namespace ams::kern::arch::arm64::smc {
register u64 x6 asm("x6") = buf[6];
register u64 x7 asm("x7") = buf[7];
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
/* Perform the call. */
if constexpr (DisableInterrupt) {
KScopedInterruptDisable di;
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
} else {
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
}
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
/* Store arguments to output. */
buf[0] = x0;
@ -78,18 +61,18 @@ namespace ams::kern::arch::arm64::smc {
PsciFunction_CpuOn = 0xC4000003,
};
template<int SmcId, bool DisableInterrupt>
template<int SmcId>
u64 PsciCall(PsciFunction function, u64 x1 = 0, u64 x2 = 0, u64 x3 = 0, u64 x4 = 0, u64 x5 = 0, u64 x6 = 0, u64 x7 = 0) {
ams::svc::lp64::SecureMonitorArguments args = { { function, x1, x2, x3, x4, x5, x6, x7 } };
SecureMonitorCall<SmcId, DisableInterrupt>(args.r);
SecureMonitorCall<SmcId>(args.r);
return args.r[0];
}
template<int SmcId, bool DisableInterrupt>
template<int SmcId>
u64 CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
return PsciCall<SmcId, DisableInterrupt>(PsciFunction_CpuOn, core_id, entrypoint, arg);
return PsciCall<SmcId>(PsciFunction_CpuOn, core_id, entrypoint, arg);
}
}

View file

@ -114,6 +114,23 @@ namespace ams::kern::init::Elf::Elf64 {
}
};
class Relr {
private:
Xword m_info;
public:
constexpr ALWAYS_INLINE bool IsLocation() const {
return (m_info & 1) == 0;
}
constexpr ALWAYS_INLINE Xword GetLocation() const {
return m_info;
}
constexpr ALWAYS_INLINE Xword GetBitmap() const {
return m_info >> 1;
}
};
enum DynamicTag {
DT_NULL = 0,
DT_RELA = 7,
@ -121,6 +138,10 @@ namespace ams::kern::init::Elf::Elf64 {
DT_REL = 17,
DT_RELENT = 19,
DT_RELRSZ = 35,
DT_RELR = 36,
DT_RELRENT = 37,
DT_RELACOUNT = 0x6ffffff9,
DT_RELCOUNT = 0x6ffffffa
};

View file

@ -31,8 +31,22 @@ namespace ams::kern::init {
u32 dynamic_offset;
u32 init_array_offset;
u32 init_array_end_offset;
u32 sysreg_offset;
};
static_assert(util::is_pod<KernelLayout>::value);
static_assert(sizeof(KernelLayout) == 0x30);
static_assert(sizeof(KernelLayout) == 0x34);
#if defined(ATMOSPHERE_ARCH_ARM64)
struct KernelSystemRegisters {
u64 ttbr0_el1;
u64 ttbr1_el1;
u64 tcr_el1;
u64 mair_el1;
u64 sctlr_el1;
};
#else
struct KernelSystemRegisters {
};
#endif
}

View file

@ -32,10 +32,17 @@ namespace ams::kern {
struct InitialProcessBinaryLayout {
uintptr_t address;
uintptr_t _08;
uintptr_t kern_address;
};
struct InitialProcessBinaryLayoutWithSize {
InitialProcessBinaryLayout layout;
size_t size;
};
KPhysicalAddress GetInitialProcessBinaryPhysicalAddress();
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr);
size_t GetInitialProcessBinarySize();
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr, size_t size);
u64 GetInitialProcessIdMin();
u64 GetInitialProcessIdMax();

View file

@ -79,7 +79,7 @@ namespace ams::kern {
}
ALWAYS_INLINE bool Close() {
/* Atomically decrement the reference count, not allowing it to become negative. */
/* Atomically decrement the reference count, not allowing it to decrement past zero. */
u32 cur = m_value.Load<std::memory_order_relaxed>();
do {
MESOSPHERE_ABORT_UNLESS(cur > 0);
@ -257,7 +257,7 @@ namespace ams::kern {
class KScopedAutoObject {
NON_COPYABLE(KScopedAutoObject);
private:
template<typename U>
template<typename U> requires std::derived_from<U, KAutoObject>
friend class KScopedAutoObject;
private:
T *m_obj;
@ -358,7 +358,7 @@ namespace ams::kern {
void Detach() {
/* Close our object, if we have one. */
if (T * const object = m_object; AMS_LIKELY(object != nullptr)) {
/* Set our object to a debug sentinel value, which will cause crash if accessed. */
/* Set our object to a debug sentinel value, which will cause a crash if accessed. */
m_object = reinterpret_cast<T *>(1);
/* Close reference to our object. */

View file

@ -133,7 +133,7 @@ namespace ams::kern {
}
Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const;
Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter &params, KProcessAddress src) const;
void Load(const KPageGroup &pg, KVirtualAddress data) const;
Result SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter &params) const;
};

View file

@ -44,6 +44,7 @@ namespace ams::kern {
KMemoryState_FlagCanChangeAttribute = (1 << 24),
KMemoryState_FlagCanCodeMemory = (1 << 25),
KMemoryState_FlagLinearMapped = (1 << 26),
KMemoryState_FlagCanPermissionLock = (1 << 27),
KMemoryState_FlagsData = KMemoryState_FlagCanReprotect | KMemoryState_FlagCanUseIpc |
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
@ -66,18 +67,22 @@ namespace ams::kern {
KMemoryState_Free = ams::svc::MemoryState_Free,
KMemoryState_Io = ams::svc::MemoryState_Io | KMemoryState_FlagMapped | KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap,
KMemoryState_Static = ams::svc::MemoryState_Static | KMemoryState_FlagMapped | KMemoryState_FlagCanQueryPhysical,
KMemoryState_IoMemory = ams::svc::MemoryState_Io | KMemoryState_FlagMapped | KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap,
KMemoryState_IoRegister = ams::svc::MemoryState_Io | KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap,
KMemoryState_Static = ams::svc::MemoryState_Static | KMemoryState_FlagCanQueryPhysical,
KMemoryState_Code = ams::svc::MemoryState_Code | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess,
KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory,
KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory | KMemoryState_FlagCanPermissionLock,
KMemoryState_Normal = ams::svc::MemoryState_Normal | KMemoryState_FlagsData | KMemoryState_FlagCanCodeMemory,
KMemoryState_Shared = ams::svc::MemoryState_Shared | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
/* KMemoryState_Alias was removed after 1.0.0. */
KMemoryState_AliasCode = ams::svc::MemoryState_AliasCode | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias,
KMemoryState_AliasCodeData = ams::svc::MemoryState_AliasCodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias | KMemoryState_FlagCanCodeMemory,
KMemoryState_AliasCodeData = ams::svc::MemoryState_AliasCodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias | KMemoryState_FlagCanCodeMemory
| KMemoryState_FlagCanPermissionLock,
KMemoryState_Ipc = ams::svc::MemoryState_Ipc | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
@ -85,7 +90,7 @@ namespace ams::kern {
KMemoryState_Stack = ams::svc::MemoryState_Stack | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagMapped | KMemoryState_FlagLinearMapped,
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagLinearMapped,
KMemoryState_Transfered = ams::svc::MemoryState_Transfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanChangeAttribute
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
@ -104,43 +109,44 @@ namespace ams::kern {
KMemoryState_NonDeviceIpc = ams::svc::MemoryState_NonDeviceIpc | KMemoryState_FlagsMisc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_Kernel = ams::svc::MemoryState_Kernel | KMemoryState_FlagMapped,
KMemoryState_Kernel = ams::svc::MemoryState_Kernel,
KMemoryState_GeneratedCode = ams::svc::MemoryState_GeneratedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDebug | KMemoryState_FlagLinearMapped,
KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
KMemoryState_Coverage = ams::svc::MemoryState_Coverage | KMemoryState_FlagMapped,
KMemoryState_Insecure = ams::svc::MemoryState_Insecure | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped | KMemoryState_FlagCanChangeAttribute
| KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap
KMemoryState_Insecure = ams::svc::MemoryState_Insecure | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped | KMemoryState_FlagCanChangeAttribute
| KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanQueryPhysical
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
};
#if 1
static_assert(KMemoryState_Free == 0x00000000);
static_assert(KMemoryState_Io == 0x00182001);
static_assert(KMemoryState_Static == 0x00042002);
static_assert(KMemoryState_IoMemory == 0x00182001);
static_assert(KMemoryState_IoRegister == 0x00180001);
static_assert(KMemoryState_Static == 0x00040002);
static_assert(KMemoryState_Code == 0x04DC7E03);
static_assert(KMemoryState_CodeData == 0x07FEBD04);
static_assert(KMemoryState_CodeData == 0x0FFEBD04);
static_assert(KMemoryState_Normal == 0x077EBD05);
static_assert(KMemoryState_Shared == 0x04402006);
static_assert(KMemoryState_AliasCode == 0x04DD7E08);
static_assert(KMemoryState_AliasCodeData == 0x07FFBD09);
static_assert(KMemoryState_AliasCodeData == 0x0FFFBD09);
static_assert(KMemoryState_Ipc == 0x045C3C0A);
static_assert(KMemoryState_Stack == 0x045C3C0B);
static_assert(KMemoryState_ThreadLocal == 0x0400200C);
static_assert(KMemoryState_ThreadLocal == 0x0400000C);
static_assert(KMemoryState_Transfered == 0x055C3C0D);
static_assert(KMemoryState_SharedTransfered == 0x045C380E);
static_assert(KMemoryState_SharedCode == 0x0440380F);
static_assert(KMemoryState_Inaccessible == 0x00000010);
static_assert(KMemoryState_NonSecureIpc == 0x045C3811);
static_assert(KMemoryState_NonDeviceIpc == 0x044C2812);
static_assert(KMemoryState_Kernel == 0x00002013);
static_assert(KMemoryState_Kernel == 0x00000013);
static_assert(KMemoryState_GeneratedCode == 0x04402214);
static_assert(KMemoryState_CodeOut == 0x04402015);
static_assert(KMemoryState_Coverage == 0x00002016);
static_assert(KMemoryState_Insecure == 0x05583817);
static_assert(KMemoryState_Coverage == 0x00002016); /* TODO: Is this correct? */
static_assert(KMemoryState_Insecure == 0x055C3817);
#endif
enum KMemoryPermission : u8 {
@ -171,20 +177,21 @@ namespace ams::kern {
};
constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) {
return static_cast<KMemoryPermission>((util::ToUnderlying(perm) & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((util::ToUnderlying(perm) & KMemoryPermission_UserWrite) << KMemoryPermission_KernelShift) | (perm == ams::svc::MemoryPermission_None ? KMemoryPermission_NotMapped : KMemoryPermission_None));
return static_cast<KMemoryPermission>((util::ToUnderlying(perm) & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((util::ToUnderlying(perm) & ams::svc::MemoryPermission_Write) ? KMemoryPermission_KernelWrite : KMemoryPermission_None) | (perm == ams::svc::MemoryPermission_None ? KMemoryPermission_NotMapped : KMemoryPermission_None));
}
enum KMemoryAttribute : u8 {
KMemoryAttribute_None = 0x00,
KMemoryAttribute_All = 0xFF,
KMemoryAttribute_UserMask = KMemoryAttribute_All,
KMemoryAttribute_None = 0x00,
KMemoryAttribute_All = 0xFF,
KMemoryAttribute_UserMask = KMemoryAttribute_All,
KMemoryAttribute_Locked = ams::svc::MemoryAttribute_Locked,
KMemoryAttribute_IpcLocked = ams::svc::MemoryAttribute_IpcLocked,
KMemoryAttribute_DeviceShared = ams::svc::MemoryAttribute_DeviceShared,
KMemoryAttribute_Uncached = ams::svc::MemoryAttribute_Uncached,
KMemoryAttribute_Locked = ams::svc::MemoryAttribute_Locked,
KMemoryAttribute_IpcLocked = ams::svc::MemoryAttribute_IpcLocked,
KMemoryAttribute_DeviceShared = ams::svc::MemoryAttribute_DeviceShared,
KMemoryAttribute_Uncached = ams::svc::MemoryAttribute_Uncached,
KMemoryAttribute_PermissionLocked = ams::svc::MemoryAttribute_PermissionLocked,
KMemoryAttribute_SetMask = KMemoryAttribute_Uncached,
KMemoryAttribute_SetMask = KMemoryAttribute_Uncached | KMemoryAttribute_PermissionLocked,
};
enum KMemoryBlockDisableMergeAttribute : u8 {
@ -258,6 +265,10 @@ namespace ams::kern {
return m_state;
}
constexpr ams::svc::MemoryState GetSvcState() const {
return static_cast<ams::svc::MemoryState>(m_state & KMemoryState_Mask);
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
@ -320,6 +331,10 @@ namespace ams::kern {
return this->GetEndAddress() - 1;
}
constexpr KMemoryState GetState() const {
return m_memory_state;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
@ -439,6 +454,14 @@ namespace ams::kern {
}
}
constexpr void UpdateAttribute(u32 mask, u32 attr) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT((mask & KMemoryAttribute_IpcLocked) == 0);
MESOSPHERE_ASSERT((mask & KMemoryAttribute_DeviceShared) == 0);
m_attribute = static_cast<KMemoryAttribute>((m_attribute & ~mask) | attr);
}
constexpr void Split(KMemoryBlock *block, KProcessAddress addr) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this->GetAddress() < addr);

View file

@ -104,7 +104,9 @@ namespace ams::kern {
void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr);
void UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm);
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr);
void UpdateAttribute(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, u32 mask, u32 attr);
iterator FindIterator(KProcessAddress address) const {
return m_memory_block_tree.find(KMemoryBlock(util::ConstantInitialize, address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None));

View file

@ -185,7 +185,7 @@ namespace ams::kern {
}
}
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random);
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index);
public:
KMemoryManager()
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process()
@ -199,7 +199,7 @@ namespace ams::kern {
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option);
NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
Pool GetPool(KPhysicalAddress address) const {

View file

@ -212,7 +212,9 @@ namespace ams::kern {
static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
constexpr inline const auto KMemoryRegionType_DramKernelSecureAppletMemory = KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(KMemoryRegionAttr_LinearMapped);
constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown = KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute(KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() == (0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
constexpr inline const auto KMemoryRegionType_DramReservedEarly = KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == (0x16 | KMemoryRegionAttr_NoUserMap));
@ -228,53 +230,55 @@ namespace ams::kern {
constexpr inline const auto KMemoryRegionType_DramPoolPartition = KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline const auto KMemoryRegionType_DramPoolManagement = KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(KMemoryRegionAttr_CarveoutProtected);
constexpr inline const auto KMemoryRegionType_DramUserPool = KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected));
static_assert(KMemoryRegionType_DramUserPool.GetValue() == (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline const auto KMemoryRegionType_DramPoolManagement = KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
/* UNUSED: .Derive(4, 1); */
/* UNUSED: .Derive(4, 2); */
constexpr inline const auto KMemoryRegionType_DramUserPool = KMemoryRegionType_DramPoolPartition.Derive(4, 3);
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == (0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected));
static_assert(KMemoryRegionType_DramUserPool .GetValue() == (0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline const auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_DramSystemNonSecurePool = KMemoryRegionType_DramUserPool.Derive(4, 2);
constexpr inline const auto KMemoryRegionType_DramSystemPool = KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
static_assert(KMemoryRegionType_DramApplicationPool .GetValue() == (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramAppletPool .GetValue() == (0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == (0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemPool .GetValue() == (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected));
static_assert(KMemoryRegionType_DramApplicationPool .GetValue() == (0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramAppletPool .GetValue() == (0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == (0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemPool .GetValue() == (0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected));
constexpr inline const auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelPtHeap = KMemoryRegionType_Dram.DeriveSparse(1, 3, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelTraceBuffer = KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelPtHeap = KMemoryRegionType_Dram.DeriveSparse(1, 4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelTraceBuffer = KMemoryRegionType_Dram.DeriveSparse(1, 4, 2);
static_assert(KMemoryRegionType_VirtualDramHeapBase .GetValue() == 0x1A);
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap .GetValue() == 0x2A);
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
/* UNUSED: .DeriveSparse(2, 2, 0); */
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = KMemoryRegionType_Dram.Advance(2).Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = KMemoryRegionType_Dram.Advance(2).Derive(4, 1);
/* UNUSED: .Derive(4, 2); */
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown = KMemoryRegionType_Dram.Advance(2).Derive(4, 3);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug .GetValue() == (0x32));
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52));
static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown .GetValue() == (0x92));
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
static_assert(KMemoryRegionType_VirtualDramKernelInitPt .GetValue() == 0x19A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);
static_assert(KMemoryRegionType_VirtualDramUserPool .GetValue() == 0x31A);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2);
/* UNUSED: .Derive(4, 3); */
static_assert(KMemoryRegionType_VirtualDramKernelInitPt .GetValue() == 0x31A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A);
static_assert(KMemoryRegionType_VirtualDramUserPool .GetValue() == 0x61A);
/* NOTE: For unknown reason, the pools are derived out-of-order here. */
/* It's worth eventually trying to understand why Nintendo made this choice. */
/* UNUSED: .Derive(6, 0); */
/* UNUSED: .Derive(6, 1); */
constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool = KMemoryRegionType_VirtualDramUserPool.Derive(6, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool = KMemoryRegionType_VirtualDramUserPool.Derive(6, 3);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool = KMemoryRegionType_VirtualDramUserPool.Derive(6, 4);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool = KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);
static_assert(KMemoryRegionType_VirtualDramAppletPool .GetValue() == 0x1B1A);
static_assert(KMemoryRegionType_VirtualDramApplicationPool .GetValue() == 0x271A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);
static_assert(KMemoryRegionType_VirtualDramSystemPool .GetValue() == 0x331A);
constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 3);
static_assert(KMemoryRegionType_VirtualDramApplicationPool .GetValue() == 0x361A);
static_assert(KMemoryRegionType_VirtualDramAppletPool .GetValue() == 0x561A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A);
static_assert(KMemoryRegionType_VirtualDramSystemPool .GetValue() == 0x961A);
constexpr inline const auto KMemoryRegionType_ArchDeviceBase = KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
constexpr inline const auto KMemoryRegionType_BoardDeviceBase = KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly();
@ -328,12 +332,14 @@ namespace ams::kern {
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
constexpr ALWAYS_INLINE KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelPtHeap;
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
} else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureUnknown;
} else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
return KMemoryRegionType_VirtualDramUnknownDebug;
} else {

View file

@ -145,6 +145,8 @@ namespace ams::kern {
bool IsEquivalentTo(const KPageGroup &rhs) const;
Result CopyRangeTo(KPageGroup &out, size_t offset, size_t size) const;
ALWAYS_INLINE bool operator==(const KPageGroup &rhs) const {
return this->IsEquivalentTo(rhs);
}
@ -158,8 +160,16 @@ namespace ams::kern {
private:
const KPageGroup *m_pg;
public:
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp) : m_pg(gp) { if (m_pg) { m_pg->Open(); } }
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ }
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp, bool not_first = true) : m_pg(gp) {
if (m_pg) {
if (not_first) {
m_pg->Open();
} else {
m_pg->OpenFirst();
}
}
}
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp, bool not_first = true) : KScopedPageGroup(std::addressof(gp), not_first) { /* ... */ }
ALWAYS_INLINE ~KScopedPageGroup() { if (m_pg) { m_pg->Close(); } }
ALWAYS_INLINE void CancelClose() {

View file

@ -57,11 +57,29 @@ namespace ams::kern {
using TraversalEntry = KPageTableImpl::TraversalEntry;
using TraversalContext = KPageTableImpl::TraversalContext;
struct MemoryRange {
KPhysicalAddress address;
size_t size;
class MemoryRange {
private:
KPhysicalAddress m_address;
size_t m_size;
bool m_heap;
u8 m_attr;
public:
constexpr MemoryRange() : m_address(Null<KPhysicalAddress>), m_size(0), m_heap(false), m_attr(0) { /* ... */ }
void Close();
void Set(KPhysicalAddress address, size_t size, bool heap, u8 attr) {
m_address = address;
m_size = size;
m_heap = heap;
m_attr = attr;
}
constexpr KPhysicalAddress GetAddress() const { return m_address; }
constexpr size_t GetSize() const { return m_size; }
constexpr bool IsHeap() const { return m_heap; }
constexpr u8 GetAttribute() const { return m_attr; }
void Open();
void Close();
};
protected:
enum MemoryFillValue {
@ -71,10 +89,19 @@ namespace ams::kern {
MemoryFillValue_Heap = 'Z',
};
enum RegionType {
RegionType_KernelMap = 0,
RegionType_Stack = 1,
RegionType_Alias = 2,
RegionType_Heap = 3,
RegionType_Count,
};
enum OperationType {
OperationType_Map = 0,
OperationType_MapFirst = 1,
OperationType_MapGroup = 2,
OperationType_MapGroup = 1,
OperationType_MapFirstGroup = 2,
OperationType_Unmap = 3,
OperationType_ChangePermissions = 4,
OperationType_ChangePermissionsAndRefresh = 5,
@ -150,15 +177,9 @@ namespace ams::kern {
private:
KProcessAddress m_address_space_start;
KProcessAddress m_address_space_end;
KProcessAddress m_heap_region_start;
KProcessAddress m_heap_region_end;
KProcessAddress m_region_starts[RegionType_Count];
KProcessAddress m_region_ends[RegionType_Count];
KProcessAddress m_current_heap_end;
KProcessAddress m_alias_region_start;
KProcessAddress m_alias_region_end;
KProcessAddress m_stack_region_start;
KProcessAddress m_stack_region_end;
KProcessAddress m_kernel_map_region_start;
KProcessAddress m_kernel_map_region_end;
KProcessAddress m_alias_code_region_start;
KProcessAddress m_alias_code_region_end;
KProcessAddress m_code_region_start;
@ -168,6 +189,7 @@ namespace ams::kern {
size_t m_mapped_unsafe_physical_memory;
size_t m_mapped_insecure_memory;
size_t m_mapped_ipc_server_memory;
size_t m_alias_region_extra_size;
mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock;
KLightLock m_device_map_lock;
@ -188,12 +210,12 @@ namespace ams::kern {
MemoryFillValue m_stack_fill_value;
public:
constexpr explicit KPageTableBase(util::ConstantInitializeTag)
: m_address_space_start(Null<KProcessAddress>), m_address_space_end(Null<KProcessAddress>), m_heap_region_start(Null<KProcessAddress>),
m_heap_region_end(Null<KProcessAddress>), m_current_heap_end(Null<KProcessAddress>), m_alias_region_start(Null<KProcessAddress>),
m_alias_region_end(Null<KProcessAddress>), m_stack_region_start(Null<KProcessAddress>), m_stack_region_end(Null<KProcessAddress>),
m_kernel_map_region_start(Null<KProcessAddress>), m_kernel_map_region_end(Null<KProcessAddress>), m_alias_code_region_start(Null<KProcessAddress>),
: m_address_space_start(Null<KProcessAddress>), m_address_space_end(Null<KProcessAddress>),
m_region_starts{Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>},
m_region_ends{Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>},
m_current_heap_end(Null<KProcessAddress>), m_alias_code_region_start(Null<KProcessAddress>),
m_alias_code_region_end(Null<KProcessAddress>), m_code_region_start(Null<KProcessAddress>), m_code_region_end(Null<KProcessAddress>),
m_max_heap_size(), m_mapped_physical_memory_size(), m_mapped_unsafe_physical_memory(), m_mapped_insecure_memory(), m_mapped_ipc_server_memory(),
m_max_heap_size(), m_mapped_physical_memory_size(), m_mapped_unsafe_physical_memory(), m_mapped_insecure_memory(), m_mapped_ipc_server_memory(), m_alias_region_extra_size(),
m_general_lock(), m_map_physical_memory_lock(), m_device_map_lock(), m_impl(util::ConstantInitialize), m_memory_block_manager(util::ConstantInitialize),
m_allocate_option(), m_address_space_width(), m_is_kernel(), m_enable_aslr(), m_enable_device_address_space_merge(),
m_memory_block_slab_manager(), m_block_info_manager(), m_resource_limit(), m_cached_physical_linear_region(), m_cached_physical_heap_region(),
@ -205,7 +227,7 @@ namespace ams::kern {
explicit KPageTableBase() { /* ... */ }
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_device_address_space_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
void Finalize();
@ -221,21 +243,25 @@ namespace ams::kern {
}
constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
return this->Contains(addr, size) && m_alias_region_start <= addr && addr + size - 1 <= m_alias_region_end - 1;
return this->Contains(addr, size) && m_region_starts[RegionType_Alias] <= addr && addr + size - 1 <= m_region_ends[RegionType_Alias] - 1;
}
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
/* Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the alias code region. */
return this->CanContain(addr, size, KMemoryState_AliasCode);
return this->CanContain(addr, size, ams::svc::MemoryState_AliasCode);
}
ALWAYS_INLINE KScopedLightLock AcquireDeviceMapLock() {
return KScopedLightLock(m_device_map_lock);
}
KProcessAddress GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
KProcessAddress GetRegionAddress(ams::svc::MemoryState state) const;
size_t GetRegionSize(ams::svc::MemoryState state) const;
bool CanContain(KProcessAddress addr, size_t size, ams::svc::MemoryState state) const;
ALWAYS_INLINE KProcessAddress GetRegionAddress(KMemoryState state) const { return this->GetRegionAddress(static_cast<ams::svc::MemoryState>(state & KMemoryState_Mask)); }
ALWAYS_INLINE size_t GetRegionSize(KMemoryState state) const { return this->GetRegionSize(static_cast<ams::svc::MemoryState>(state & KMemoryState_Mask)); }
ALWAYS_INLINE bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->CanContain(addr, size, static_cast<ams::svc::MemoryState>(state & KMemoryState_Mask)); }
protected:
/* NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions */
/* in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived */
@ -293,6 +319,7 @@ namespace ams::kern {
}
Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
@ -306,9 +333,9 @@ namespace ams::kern {
Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const;
Result QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, KMemoryState state) const;
Result QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, ams::svc::MemoryState state) const;
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, KMemoryPermission perm);
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties &properties);
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
void RemapPageGroup(PageLinkedList *page_list, KProcessAddress address, size_t size, const KPageGroup &pg);
@ -320,9 +347,9 @@ namespace ams::kern {
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
Result MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
Result ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size);
Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size);
Result MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryState state, KMemoryPermission perm);
Result ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size, KMemoryState state);
Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size, KMemoryState state);
Result SetupForIpcClient(PageLinkedList *page_list, size_t *out_blocks_needed, KProcessAddress address, size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
Result SetupForIpcServer(KProcessAddress *out_addr, size_t size, KProcessAddress src_addr, KMemoryPermission test_perm, KMemoryState dst_state, KPageTableBase &src_page_table, bool send);
@ -356,8 +383,8 @@ namespace ams::kern {
Result SetMaxHeapSize(size_t size);
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const;
Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const;
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, KMemoryState_Static)); }
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, KMemoryState_Io)); }
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, ams::svc::MemoryState_Static)); }
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, ams::svc::MemoryState_Io)); }
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
@ -367,8 +394,8 @@ namespace ams::kern {
Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping);
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
Result MapInsecureMemory(KProcessAddress address, size_t size);
Result UnmapInsecureMemory(KProcessAddress address, size_t size);
Result MapInsecurePhysicalMemory(KProcessAddress address, size_t size);
Result UnmapInsecurePhysicalMemory(KProcessAddress address, size_t size);
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm));
@ -392,12 +419,13 @@ namespace ams::kern {
Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr);
Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size);
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size);
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size);
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state);
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size);
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size);
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size, KMemoryState state);
Result LockForMapDeviceAddressSpace(bool *out_is_io, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned, bool check_heap);
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
@ -458,24 +486,30 @@ namespace ams::kern {
}
public:
KProcessAddress GetAddressSpaceStart() const { return m_address_space_start; }
KProcessAddress GetHeapRegionStart() const { return m_heap_region_start; }
KProcessAddress GetAliasRegionStart() const { return m_alias_region_start; }
KProcessAddress GetStackRegionStart() const { return m_stack_region_start; }
KProcessAddress GetKernelMapRegionStart() const { return m_kernel_map_region_start; }
KProcessAddress GetHeapRegionStart() const { return m_region_starts[RegionType_Heap]; }
KProcessAddress GetAliasRegionStart() const { return m_region_starts[RegionType_Alias]; }
KProcessAddress GetStackRegionStart() const { return m_region_starts[RegionType_Stack]; }
KProcessAddress GetKernelMapRegionStart() const { return m_region_starts[RegionType_KernelMap]; }
KProcessAddress GetAliasCodeRegionStart() const { return m_alias_code_region_start; }
size_t GetAddressSpaceSize() const { return m_address_space_end - m_address_space_start; }
size_t GetHeapRegionSize() const { return m_heap_region_end - m_heap_region_start; }
size_t GetAliasRegionSize() const { return m_alias_region_end - m_alias_region_start; }
size_t GetStackRegionSize() const { return m_stack_region_end - m_stack_region_start; }
size_t GetKernelMapRegionSize() const { return m_kernel_map_region_end - m_kernel_map_region_start; }
size_t GetAddressSpaceSize() const { return m_address_space_end - m_address_space_start; }
size_t GetHeapRegionSize() const { return m_region_ends[RegionType_Heap] - m_region_starts[RegionType_Heap]; }
size_t GetAliasRegionSize() const { return m_region_ends[RegionType_Alias] - m_region_starts[RegionType_Alias]; }
size_t GetStackRegionSize() const { return m_region_ends[RegionType_Stack] - m_region_starts[RegionType_Stack]; }
size_t GetKernelMapRegionSize() const { return m_region_ends[RegionType_KernelMap] - m_region_starts[RegionType_KernelMap]; }
size_t GetAliasCodeRegionSize() const { return m_alias_code_region_end - m_alias_code_region_start; }
size_t GetAliasRegionExtraSize() const { return m_alias_region_extra_size; }
size_t GetNormalMemorySize() const {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size;
return (m_current_heap_end - m_region_starts[RegionType_Heap]) + m_mapped_physical_memory_size;
}
size_t GetCodeSize() const;

View file

@ -76,6 +76,7 @@ namespace ams::kern {
bool m_is_signaled;
bool m_is_initialized;
bool m_is_application;
bool m_is_default_application_system_resource;
char m_name[13];
util::Atomic<u16> m_num_running_threads;
u32 m_flags;
@ -178,6 +179,8 @@ namespace ams::kern {
constexpr bool IsApplication() const { return m_is_application; }
constexpr bool IsDefaultApplicationSystemResource() const { return m_is_default_application_system_resource; }
constexpr bool IsSuspended() const { return m_is_suspended; }
constexpr void SetSuspended(bool suspended) { m_is_suspended = suspended; }
@ -280,12 +283,20 @@ namespace ams::kern {
void IncrementRunningThreadCount();
void DecrementRunningThreadCount();
size_t GetRequiredSecureMemorySizeNonDefault() const {
return (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) ? static_cast<KSecureSystemResource *>(m_system_resource)->CalculateRequiredSecureMemorySize() : 0;
}
size_t GetRequiredSecureMemorySize() const {
return m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->CalculateRequiredSecureMemorySize() : 0;
}
size_t GetTotalSystemResourceSize() const {
return m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->GetSize() : 0;
return (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) ? static_cast<KSecureSystemResource *>(m_system_resource)->GetSize() : 0;
}
size_t GetUsedSystemResourceSize() const {
return m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->GetUsedSize() : 0;
return (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) ? static_cast<KSecureSystemResource *>(m_system_resource)->GetUsedSize() : 0;
}
void SetRunningThread(s32 core, KThread *thread, u64 idle_count, u64 switch_count) {

View file

@ -53,7 +53,7 @@ namespace ams::kern {
static size_t GetRealMemorySize();
static size_t GetIntendedMemorySize();
static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out, KPhysicalAddress kern_base_address);
static bool ShouldIncreaseThreadResourceLimit();
static void TurnOnCpu(u64 core_id, const ams::kern::init::KInitArguments *args);
static size_t GetApplicationPoolSize();

View file

@ -120,10 +120,6 @@ namespace ams::kern {
return m_address == rhs;
}
constexpr ALWAYS_INLINE bool operator!=(uintptr_t rhs) const {
return m_address != rhs;
}
/* Allow getting the address explicitly, for use in accessors. */
constexpr ALWAYS_INLINE uintptr_t GetValue() const {
return m_address;

View file

@ -25,7 +25,8 @@ namespace ams::kern {
static constexpr s32 ExitWorkerPriority = 11;
enum WorkerType {
WorkerType_Exit,
WorkerType_ExitThread,
WorkerType_ExitProcess,
WorkerType_Count,
};

View file

@ -164,8 +164,9 @@ namespace ams::kern {
};
template<typename Derived, typename Base, bool SupportDynamicExpansion = false> requires std::derived_from<Base, KAutoObjectWithList>
template<typename Derived, typename Base, bool SupportDynamicExpansion = false>
class KAutoObjectWithSlabHeapAndContainer : public KAutoObjectWithSlabHeapBase<Derived, Base, SupportDynamicExpansion> {
static_assert(std::derived_from<Base, KAutoObjectWithList>);
private:
static constinit inline KAutoObjectWithListContainer<Derived> s_container;
public:

View file

@ -59,7 +59,9 @@ namespace ams::kern::arch::arm64 {
EsrEc_BrkInstruction = 0b111100,
};
constexpr u32 GetInstructionData(const KExceptionContext *context, u64 esr) {
u32 GetInstructionDataSupervisorMode(const KExceptionContext *context, u64 esr) {
/* Check for THUMB usermode */
if ((context->psr & 0x3F) == 0x30) {
u32 insn = *reinterpret_cast<u16 *>(context->pc & ~0x1);
@ -74,6 +76,37 @@ namespace ams::kern::arch::arm64 {
}
}
u32 GetInstructionDataUserMode(const KExceptionContext *context) {
/* Check for THUMB usermode */
u32 insn = 0;
if ((context->psr & 0x3F) == 0x30) {
u16 insn_high = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_high), reinterpret_cast<u16 *>(context->pc & ~0x1), sizeof(insn_high))) {
insn = insn_high;
/* Check if the instruction was a THUMB mode branch prefix. */
if (((insn >> 11) & 0b11110) == 0b11110) {
u16 insn_low = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_low), reinterpret_cast<u16 *>((context->pc & ~0x1) + sizeof(u16)), sizeof(insn_low))) {
insn = (static_cast<u32>(insn_high) << 16) | (static_cast<u32>(insn_low) << 0);
} else {
insn = 0;
}
}
} else {
insn = 0;
}
} else {
u32 insn_value = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_value), reinterpret_cast<u32 *>(context->pc), sizeof(insn_value))) {
insn = insn_value;
} else {
insn = 0;
}
}
return insn;
}
void HandleUserException(KExceptionContext *context, u64 esr, u64 far, u64 afsr0, u64 afsr1, u32 data) {
KProcess &cur_process = GetCurrentProcess();
bool should_process_user_exception = KTargetSystem::IsUserExceptionHandlersEnabled();
@ -190,6 +223,13 @@ namespace ams::kern::arch::arm64 {
type = ams::svc::ExceptionType_InstructionAbort;
break;
case EsrEc_DataAbortEl0:
/* If esr.IFSC is "Alignment Fault", return UnalignedData instead of DataAbort. */
if ((esr & 0x3F) == 0b100001) {
type = ams::svc::ExceptionType_UnalignedData;
} else {
type = ams::svc::ExceptionType_DataAbort;
}
break;
default:
type = ams::svc::ExceptionType_DataAbort;
break;
@ -501,9 +541,10 @@ namespace ams::kern::arch::arm64 {
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
/* Retrieve information about the exception. */
const u64 esr = cpu::GetEsrEl1();
const u64 afsr0 = cpu::GetAfsr0El1();
const u64 afsr1 = cpu::GetAfsr1El1();
const bool is_user_mode = (context->psr & 0xF) == 0;
const u64 esr = cpu::GetEsrEl1();
const u64 afsr0 = cpu::GetAfsr0El1();
const u64 afsr1 = cpu::GetAfsr1El1();
u64 far = 0;
u32 data = 0;
@ -514,7 +555,12 @@ namespace ams::kern::arch::arm64 {
case EsrEc_BkptInstruction:
case EsrEc_BrkInstruction:
far = context->pc;
data = GetInstructionData(context, esr);
/* NOTE: Nintendo always calls GetInstructionDataUserMode. */
if (is_user_mode) {
data = GetInstructionDataUserMode(context);
} else {
data = GetInstructionDataSupervisorMode(context, esr);
}
break;
case EsrEc_Svc32:
if (context->psr & 0x20) {
@ -543,7 +589,6 @@ namespace ams::kern::arch::arm64 {
/* Verify that spsr's M is allowable (EL0t). */
{
const bool is_user_mode = (context->psr & 0xF) == 0;
if (is_user_mode) {
/* If the user disable count is set, we may need to pin the current thread. */
if (GetCurrentThread().GetUserDisableCount() != 0 && GetCurrentProcess().GetPinnedThread(GetCurrentCoreId()) == nullptr) {

View file

@ -365,7 +365,7 @@ namespace ams::kern::arch::arm64 {
value = 0;
}
/* Set the watchkpoint. */
/* Set the watchpoint. */
switch (name) {
case ams::svc::HardwareBreakPointRegisterName_D0: MESOSPHERE_SET_HW_WATCH_POINT( 0, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D1: MESOSPHERE_SET_HW_WATCH_POINT( 1, flags, value); break;

View file

@ -207,10 +207,7 @@ namespace ams::kern::arch::arm64 {
R_SUCCEED();
}
Result KPageTable::InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
/* The input ID isn't actually used. */
MESOSPHERE_UNUSED(id);
Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
/* Get an ASID */
m_asid = g_asid_manager.Reserve();
ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); };
@ -225,10 +222,10 @@ namespace ams::kern::arch::arm64 {
ON_RESULT_FAILURE_2 { m_manager->Free(new_table); };
/* Initialize our base table. */
const size_t as_width = GetAddressSpaceWidth(as_type);
const size_t as_width = GetAddressSpaceWidth(flags);
const KProcessAddress as_start = 0;
const KProcessAddress as_end = (1ul << as_width);
R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, system_resource, resource_limit));
R_TRY(KPageTableBase::InitializeForProcess(flags, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, system_resource, resource_limit));
/* Note that we've updated the table (since we created it). */
this->NoteUpdated();
@ -239,9 +236,15 @@ namespace ams::kern::arch::arm64 {
/* Only process tables should be finalized. */
MESOSPHERE_ASSERT(!this->IsKernel());
/* NOTE: Here Nintendo calls an unknown OnFinalize function. */
/* this->OnFinalize(); */
/* Note that we've updated (to ensure we're synchronized). */
this->NoteUpdated();
/* NOTE: Here Nintendo calls a second unknown OnFinalize function. */
/* this->OnFinalize2(); */
/* Free all pages in the table. */
{
/* Get implementation objects. */
@ -255,7 +258,7 @@ namespace ams::kern::arch::arm64 {
/* Begin the traversal. */
TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 };
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 };
bool cur_valid = false;
TraversalEntry next_entry;
bool next_valid;
@ -265,7 +268,9 @@ namespace ams::kern::arch::arm64 {
/* Iterate over entries. */
while (true) {
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
/* NOTE: Nintendo really does check next_entry.attr == (cur_entry.attr != 0)...but attr is always zero as of 18.0.0, and this is "probably" for the new console or debug-only anyway, */
/* so we'll implement the weird logic verbatim even though it doesn't match the GetContiguousRange logic. */
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size && next_entry.attr == (cur_entry.attr ? 1 : 0))) {
cur_entry.block_size += next_entry.block_size;
} else {
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
@ -348,7 +353,7 @@ namespace ams::kern::arch::arm64 {
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_ASSERT(this->ContainsPages(virt_addr, num_pages));
if (operation == OperationType_Map || operation == OperationType_MapFirst) {
if (operation == OperationType_Map) {
MESOSPHERE_ABORT_UNLESS(is_pa_valid);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
} else {
@ -375,8 +380,7 @@ namespace ams::kern::arch::arm64 {
switch (operation) {
case OperationType_Map:
case OperationType_MapFirst:
R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, operation != OperationType_MapFirst, page_list, reuse_ll));
R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
case OperationType_ChangePermissions:
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, false, page_list, reuse_ll));
case OperationType_ChangePermissionsAndRefresh:
@ -399,7 +403,8 @@ namespace ams::kern::arch::arm64 {
auto entry_template = this->GetEntryTemplate(properties);
switch (operation) {
case OperationType_MapGroup:
R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
case OperationType_MapFirstGroup:
R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, operation != OperationType_MapFirstGroup, page_list, reuse_ll));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
@ -756,7 +761,7 @@ namespace ams::kern::arch::arm64 {
R_SUCCEED();
}
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll) {
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Cache initial addresses for use on cleanup. */
@ -827,21 +832,17 @@ namespace ams::kern::arch::arm64 {
/* Open references to the pages, if we should. */
if (IsHeapPhysicalAddress(orig_phys_addr)) {
if (not_first) {
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
} else {
Kernel::GetMemoryManager().OpenFirst(orig_phys_addr, num_pages);
}
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
}
R_SUCCEED();
}
Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* We want to maintain a new reference to every page in the group. */
KScopedPageGroup spg(pg);
KScopedPageGroup spg(pg, not_first);
/* Cache initial address for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr;

View file

@ -46,12 +46,14 @@ namespace ams::kern::arch::arm64 {
out_entry->block_size = L3BlockSize;
}
out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits();
out_entry->attr = 0;
return true;
} else {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L3BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
return false;
}
}
@ -69,6 +71,7 @@ namespace ams::kern::arch::arm64 {
out_entry->block_size = L2BlockSize;
}
out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits();
out_entry->attr = 0;
/* Set the output context. */
out_context->l3_entry = nullptr;
@ -79,6 +82,8 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L2BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l3_entry = nullptr;
return false;
}
@ -108,6 +113,8 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr;
return false;
@ -119,6 +126,7 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l1_entry = m_table + m_num_entries;
out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr;
@ -220,6 +228,7 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
context->l1_entry = m_table + m_num_entries;
context->l2_entry = nullptr;
context->l3_entry = nullptr;

View file

@ -18,12 +18,8 @@
namespace ams::kern::arch::arm64 {
void KSupervisorPageTable::Initialize(s32 core_id) {
/* Get the identity mapping ttbr0. */
m_ttbr0_identity[core_id] = cpu::GetTtbr0El1();
/* Set sctlr_el1 */
cpu::SystemControlRegisterAccessor().SetWxn(true).Store();
cpu::EnsureInstructionConsistency();
/* Verify that sctlr_el1 has the wxn bit set. */
MESOSPHERE_ABORT_UNLESS(cpu::SystemControlRegisterAccessor().GetWxn());
/* Invalidate the entire TLB. */
cpu::InvalidateEntireTlb();

View file

@ -130,4 +130,4 @@ _ZN3ams4kern3svc14RestoreContextEm:
/* Return. */
add sp, sp, #(EXCEPTION_CONTEXT_SIZE)
eret
ERET_WITH_SPECULATION_BARRIER

View file

@ -68,7 +68,8 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
/* Check if our disable count allows us to call SVCs. */
mrs x10, tpidrro_el0
ldrh w10, [x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT)]
add x10, x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT)
ldtrh w10, [x10]
cbz w10, 1f
/* It might not, so check the stack params to see if we must not allow the SVC. */
@ -194,7 +195,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
/* Return. */
add sp, sp, #(EXCEPTION_CONTEXT_SIZE)
eret
ERET_WITH_SPECULATION_BARRIER
5: /* Return from SVC. */
@ -297,7 +298,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
/* Return. */
add sp, sp, #(EXCEPTION_CONTEXT_SIZE)
eret
ERET_WITH_SPECULATION_BARRIER
/* ams::kern::arch::arm64::SvcHandler32() */
.section .text._ZN3ams4kern4arch5arm6412SvcHandler32Ev, "ax", %progbits
@ -352,7 +353,8 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
/* Check if our disable count allows us to call SVCs. */
mrs x10, tpidrro_el0
ldrh w10, [x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT)]
add x10, x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT)
ldtrh w10, [x10]
cbz w10, 1f
/* It might not, so check the stack params to see if we must not allow the SVC. */
@ -467,7 +469,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
/* Return. */
add sp, sp, #(EXCEPTION_CONTEXT_SIZE)
eret
ERET_WITH_SPECULATION_BARRIER
5: /* Return from SVC. */
@ -547,4 +549,4 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
/* Return. */
add sp, sp, #(EXCEPTION_CONTEXT_SIZE)
eret
ERET_WITH_SPECULATION_BARRIER

View file

@ -142,10 +142,10 @@ namespace ams::kern::svc {
/* Get the target firmware. */
const auto target_fw = kern::GetTargetFirmware();
/* 10.0.0 broke the ABI for QueryIoMapping. */
/* 10.0.0 broke the ABI for QueryIoMapping, and renamed it to QueryMemoryMapping. */
if (target_fw < TargetFirmware_10_0_0) {
if (table_64) { ::ams::kern::svc::PatchSvcTableEntry(table_64, svc::SvcId_QueryIoMapping, LegacyQueryIoMapping::Call64); }
if (table_64_from_32) { ::ams::kern::svc::PatchSvcTableEntry(table_64_from_32, svc::SvcId_QueryIoMapping, LegacyQueryIoMapping::Call64From32); }
if (table_64) { ::ams::kern::svc::PatchSvcTableEntry(table_64, svc::SvcId_QueryMemoryMapping, LegacyQueryIoMapping::Call64); }
if (table_64_from_32) { ::ams::kern::svc::PatchSvcTableEntry(table_64_from_32, svc::SvcId_QueryMemoryMapping, LegacyQueryIoMapping::Call64From32); }
}
/* 6.0.0 broke the ABI for GetFutureThreadInfo, and renamed it to GetDebugFutureThreadInfo. */

View file

@ -1133,17 +1133,17 @@ namespace ams::kern::board::nintendo::nx {
size_t cur_size;
{
/* Get the current contiguous range. */
KPageTableBase::MemoryRange contig_range = { .address = Null<KPhysicalAddress>, .size = 0 };
KPageTableBase::MemoryRange contig_range;
R_TRY(page_table->OpenMemoryRangeForMapDeviceAddressSpace(std::addressof(contig_range), process_address + mapped_size, size - mapped_size, ConvertToKMemoryPermission(device_perm), is_aligned));
/* Ensure we close the range when we're done. */
ON_SCOPE_EXIT { contig_range.Close(); };
/* Get the current size. */
cur_size = contig_range.size;
cur_size = contig_range.GetSize();
/* Map the device page. */
R_TRY(this->MapDevicePage(contig_range.address, contig_range.size, cur_addr, device_perm));
R_TRY(this->MapDevicePage(contig_range.GetAddress(), cur_size, cur_addr, device_perm));
}
/* Advance. */
@ -1288,7 +1288,7 @@ namespace ams::kern::board::nintendo::nx {
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* We need to traverse the ranges that make up our mapping, to make sure they're all good. Start by getting a contiguous range. */
KPageTableBase::MemoryRange contig_range = { .address = Null<KPhysicalAddress>, .size = 0 };
KPageTableBase::MemoryRange contig_range;
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), process_address, size))) {
return false;
}
@ -1300,8 +1300,8 @@ namespace ams::kern::board::nintendo::nx {
/* Walk the directory. */
KProcessAddress cur_process_address = process_address;
size_t remaining_size = size;
KPhysicalAddress cur_phys_address = contig_range.address;
size_t remaining_in_range = contig_range.size;
KPhysicalAddress cur_phys_address = contig_range.GetAddress();
size_t remaining_in_range = contig_range.GetSize();
bool first = true;
u32 first_attr = 0;
while (remaining_size > 0) {
@ -1347,8 +1347,8 @@ namespace ams::kern::board::nintendo::nx {
}
range_open = true;
cur_phys_address = contig_range.address;
remaining_in_range = contig_range.size;
cur_phys_address = contig_range.GetAddress();
remaining_in_range = contig_range.GetSize();
}
/* Check that the physical address is expected. */
@ -1390,8 +1390,8 @@ namespace ams::kern::board::nintendo::nx {
}
range_open = true;
cur_phys_address = contig_range.address;
remaining_in_range = contig_range.size;
cur_phys_address = contig_range.GetAddress();
remaining_in_range = contig_range.GetSize();
}
/* Check that the physical address is expected, and there's enough in the range. */

View file

@ -32,7 +32,6 @@ namespace ams::kern::board::nintendo::nx {
class SavedSystemRegisters {
private:
u64 ttbr0_el1;
u64 tcr_el1;
u64 elr_el1;
u64 sp_el0;
u64 spsr_el1;
@ -92,7 +91,6 @@ namespace ams::kern::board::nintendo::nx {
void SavedSystemRegisters::Save() {
/* Save system registers. */
this->ttbr0_el1 = cpu::GetTtbr0El1();
this->tcr_el1 = cpu::GetTcrEl1();
this->tpidr_el0 = cpu::GetTpidrEl0();
this->elr_el1 = cpu::GetElrEl1();
this->sp_el0 = cpu::GetSpEl0();
@ -408,7 +406,6 @@ namespace ams::kern::board::nintendo::nx {
/* Restore system registers. */
cpu::SetTtbr0El1 (this->ttbr0_el1);
cpu::SetTcrEl1 (this->tcr_el1);
cpu::SetTpidrEl0 (this->tpidr_el0);
cpu::SetElrEl1 (this->elr_el1);
cpu::SetSpEl0 (this->sp_el0);
@ -515,24 +512,6 @@ namespace ams::kern::board::nintendo::nx {
/* Save the system registers for the current core. */
g_sleep_system_registers[core_id].Save();
/* Change the translation tables to use the kernel table. */
{
/* Get the current value of the translation control register. */
const u64 tcr = cpu::GetTcrEl1();
/* Disable translation table walks on tlb miss. */
cpu::TranslationControlRegisterAccessor(tcr).SetEpd0(true).Store();
cpu::EnsureInstructionConsistency();
/* Change the translation table base (ttbr0) to use the kernel table. */
cpu::SetTtbr0El1(Kernel::GetKernelPageTable().GetIdentityMapTtbr0(core_id));
cpu::EnsureInstructionConsistency();
/* Enable translation table walks on tlb miss. */
cpu::TranslationControlRegisterAccessor(tcr).SetEpd0(false).Store();
cpu::EnsureInstructionConsistency();
}
/* Invalidate the entire tlb. */
cpu::InvalidateEntireTlb();
@ -552,13 +531,14 @@ namespace ams::kern::board::nintendo::nx {
/* Setup the initial arguments. */
{
init_args->ttbr0 = cpu::GetTtbr0El1();
init_args->ttbr1 = cpu::GetTtbr1El1();
init_args->tcr = cpu::GetTcrEl1();
init_args->mair = cpu::GetMairEl1();
init_args->cpuactlr = cpu::GetCpuActlrEl1();
init_args->cpuectlr = cpu::GetCpuEctlrEl1();
init_args->sctlr = cpu::GetSctlrEl1();
/* Determine whether we're running on a cortex-a53 or a-57. */
cpu::MainIdRegisterAccessor midr_el1;
const auto implementer = midr_el1.GetImplementer();
const auto primary_part = midr_el1.GetPrimaryPartNumber();
const bool needs_cpu_ctlr = (implementer == cpu::MainIdRegisterAccessor::Implementer::ArmLimited) && (primary_part == cpu::MainIdRegisterAccessor::PrimaryPartNumber::CortexA57 || primary_part == cpu::MainIdRegisterAccessor::PrimaryPartNumber::CortexA53);
init_args->cpuactlr = needs_cpu_ctlr ? cpu::GetCpuActlrEl1() : 0;
init_args->cpuectlr = needs_cpu_ctlr ? cpu::GetCpuEctlrEl1() : 0;
init_args->sp = 0;
init_args->entrypoint = reinterpret_cast<uintptr_t>(::ams::kern::board::nintendo::nx::KSleepManager::ResumeEntry);
init_args->argument = sleep_buffer;

View file

@ -296,7 +296,7 @@ namespace ams::kern::board::nintendo::nx {
/* TODO: Move this into a header for the MC in general. */
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
u32 config_value;
MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
smc::init::ReadWriteRegister(std::addressof(config_value), MemoryControllerConfigurationRegister, 0, 0);
return static_cast<size_t>(config_value & 0x3FFF) << 20;
}
@ -387,7 +387,7 @@ namespace ams::kern::board::nintendo::nx {
}
void KSystemControl::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor, false>(core_id, entrypoint, arg)) == 0);
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor>(core_id, entrypoint, arg)) == 0);
}
/* Randomness for Initialization. */
@ -601,8 +601,9 @@ namespace ams::kern::board::nintendo::nx {
if (g_call_smc_on_panic) {
/* If we should, instruct the secure monitor to display a panic screen. */
smc::Panic(0xF00);
smc::ShowError(0xF00);
}
AMS_INFINITE_LOOP();
}

View file

@ -43,7 +43,7 @@ namespace ams::kern::board::nintendo::nx::smc {
enum FunctionId : u32 {
FunctionId_GetConfig = 0xC3000004,
FunctionId_GenerateRandomBytes = 0xC3000005,
FunctionId_Panic = 0xC3000006,
FunctionId_ShowError = 0xC3000006,
FunctionId_ConfigureCarveout = 0xC3000007,
FunctionId_ReadWriteRegister = 0xC3000008,
@ -51,122 +51,187 @@ namespace ams::kern::board::nintendo::nx::smc {
FunctionId_SetConfig = 0xC3000409,
};
constexpr size_t GenerateRandomBytesSizeMax = sizeof(::ams::svc::lp64::SecureMonitorArguments) - sizeof(::ams::svc::lp64::SecureMonitorArguments{}.r[0]);
/* Global lock for generate random bytes. */
constinit KSpinLock g_generate_random_lock;
bool TryGetConfigImpl(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* If successful, copy the output. */
const bool success = static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
if (AMS_LIKELY(success)) {
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.r[1 + i];
}
}
return success;
}
bool SetConfigImpl(ConfigItem config_item, u64 value) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool ReadWriteRegisterImpl(u32 *out, u64 address, u32 mask, u32 value) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Unconditionally write the output. */
*out = static_cast<u32>(args.r[1]);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool GenerateRandomBytesImpl(void *dst, size_t size) {
/* Create the arguments. */
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* If successful, copy the output. */
const bool success = static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
if (AMS_LIKELY(success)) {
std::memcpy(dst, std::addressof(args.r[1]), size);
}
return success;
}
bool ConfigureCarveoutImpl(size_t which, uintptr_t address, size_t size) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool ShowErrorImpl(u32 color) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ShowError, color } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_User>(args->r);
}
}
/* SMC functionality needed for init. */
namespace init {
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.r[1 + i];
}
/* Ensure we successfully get the config. */
MESOSPHERE_INIT_ABORT_UNLESS(TryGetConfigImpl(out, num_qwords, config_item));
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Call SmcGenerateRandomBytes() */
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
MESOSPHERE_INIT_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
/* Check that the size is valid. */
MESOSPHERE_INIT_ABORT_UNLESS(0 < size && size <= GenerateRandomBytesSizeMax);
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
/* Copy output. */
std::memcpy(dst, std::addressof(args.r[1]), size);
/* Ensure we successfully generate the random bytes. */
MESOSPHERE_INIT_ABORT_UNLESS(GenerateRandomBytesImpl(dst, size));
}
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
*out = args.r[1];
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
void ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) {
/* Ensure we successfully access the register. */
MESOSPHERE_INIT_ABORT_UNLESS(ReadWriteRegisterImpl(out, address, mask, value));
}
}
bool TryGetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
if (AMS_UNLIKELY(static_cast<SmcResult>(args.r[0]) != SmcResult::Success)) {
return false;
}
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.r[1 + i];
}
return true;
/* Get the config. */
return TryGetConfigImpl(out, num_qwords, config_item);
}
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Ensure we successfully get the config. */
MESOSPHERE_ABORT_UNLESS(TryGetConfig(out, num_qwords, config_item));
}
bool SetConfig(ConfigItem config_item, u64 value) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value } };
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
/* Set the config. */
return SetConfigImpl(config_item, value);
}
bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
*out = static_cast<u32>(args.r[1]);
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
/* Access the register. */
return ReadWriteRegisterImpl(out, address, mask, value);
}
void ConfigureCarveout(size_t which, uintptr_t address, size_t size) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) } };
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
/* Ensure that we successfully configure the carveout. */
MESOSPHERE_ABORT_UNLESS(ConfigureCarveoutImpl(which, address, size));
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Setup for call. */
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
/* Check that the size is valid. */
MESOSPHERE_ABORT_UNLESS(0 < size && size <= GenerateRandomBytesSizeMax);
/* Make call. */
{
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(g_generate_random_lock);
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
}
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
/* Acquire the exclusive right to generate random bytes. */
KScopedSpinLock lk(g_generate_random_lock);
/* Copy output. */
std::memcpy(dst, std::addressof(args.r[1]), size);
/* Ensure we successfully generate the random bytes. */
MESOSPHERE_ABORT_UNLESS(GenerateRandomBytesImpl(dst, size));
}
void NORETURN Panic(u32 color) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_Panic, color } };
void ShowError(u32 color) {
/* Disable interrupts. */
KScopedInterruptDisable di;
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
AMS_INFINITE_LOOP();
/* Ensure we successfully show the error. */
MESOSPHERE_ABORT_UNLESS(ShowErrorImpl(color));
}
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_User, true>(args->r);
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Perform the call. */
CallSecureMonitorFromUserImpl(args);
}
}

View file

@ -111,7 +111,7 @@ namespace ams::kern::board::nintendo::nx::smc {
bool SetConfig(ConfigItem config_item, u64 value);
void NORETURN Panic(u32 color);
void ShowError(u32 color);
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
@ -119,7 +119,7 @@ namespace ams::kern::board::nintendo::nx::smc {
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
void GenerateRandomBytes(void *dst, size_t size);
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
void ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
}

View file

@ -21,10 +21,13 @@ namespace ams::kern::init::Elf {
void ApplyRelocations(uintptr_t base_address, const Dyn *dynamic) {
uintptr_t dyn_rel = 0;
uintptr_t dyn_rela = 0;
uintptr_t dyn_relr = 0;
uintptr_t rel_count = 0;
uintptr_t rela_count = 0;
uintptr_t relr_sz = 0;
uintptr_t rel_ent = 0;
uintptr_t rela_ent = 0;
uintptr_t relr_ent = 0;
/* Iterate over all tags, identifying important extents. */
for (const Dyn *cur_entry = dynamic; cur_entry->GetTag() != DT_NULL; cur_entry++) {
@ -35,43 +38,99 @@ namespace ams::kern::init::Elf {
case DT_RELA:
dyn_rela = base_address + cur_entry->GetPtr();
break;
case DT_RELR:
dyn_relr = base_address + cur_entry->GetPtr();
break;
case DT_RELENT:
rel_ent = cur_entry->GetValue();
break;
case DT_RELAENT:
rela_ent = cur_entry->GetValue();
break;
case DT_RELRENT:
relr_ent = cur_entry->GetValue();
break;
case DT_RELCOUNT:
rel_count = cur_entry->GetValue();
break;
case DT_RELACOUNT:
rela_count = cur_entry->GetValue();
break;
case DT_RELRSZ:
relr_sz = cur_entry->GetValue();
break;
}
}
/* Apply all Rel relocations */
for (size_t i = 0; i < rel_count; i++) {
const auto &rel = *reinterpret_cast<const Elf::Rel *>(dyn_rel + rel_ent * i);
if (rel_count > 0) {
/* Check that the rel relocations are applyable. */
MESOSPHERE_INIT_ABORT_UNLESS(dyn_rel != 0);
MESOSPHERE_INIT_ABORT_UNLESS(rel_ent == sizeof(Elf::Rel));
/* Only allow architecture-specific relocations. */
while (rel.GetType() != R_ARCHITECTURE_RELATIVE) { /* ... */ }
for (size_t i = 0; i < rel_count; ++i) {
const auto &rel = reinterpret_cast<const Elf::Rel *>(dyn_rel)[i];
/* Apply the relocation. */
Elf::Addr *target_address = reinterpret_cast<Elf::Addr *>(base_address + rel.GetOffset());
*target_address += base_address;
/* Only allow architecture-specific relocations. */
while (rel.GetType() != R_ARCHITECTURE_RELATIVE) { /* ... */ }
/* Apply the relocation. */
Elf::Addr *target_address = reinterpret_cast<Elf::Addr *>(base_address + rel.GetOffset());
*target_address += base_address;
}
}
/* Apply all Rela relocations. */
for (size_t i = 0; i < rela_count; i++) {
const auto &rela = *reinterpret_cast<const Elf::Rela *>(dyn_rela + rela_ent * i);
if (rela_count > 0) {
/* Check that the rela relocations are applyable. */
MESOSPHERE_INIT_ABORT_UNLESS(dyn_rela != 0);
MESOSPHERE_INIT_ABORT_UNLESS(rela_ent == sizeof(Elf::Rela));
/* Only allow architecture-specific relocations. */
while (rela.GetType() != R_ARCHITECTURE_RELATIVE) { /* ... */ }
for (size_t i = 0; i < rela_count; ++i) {
const auto &rela = reinterpret_cast<const Elf::Rela *>(dyn_rela)[i];
/* Apply the relocation. */
Elf::Addr *target_address = reinterpret_cast<Elf::Addr *>(base_address + rela.GetOffset());
*target_address = base_address + rela.GetAddend();
/* Only allow architecture-specific relocations. */
while (rela.GetType() != R_ARCHITECTURE_RELATIVE) { /* ... */ }
/* Apply the relocation. */
Elf::Addr *target_address = reinterpret_cast<Elf::Addr *>(base_address + rela.GetOffset());
*target_address = base_address + rela.GetAddend();
}
}
/* Apply all Relr relocations. */
if (relr_sz >= sizeof(Elf::Relr)) {
/* Check that the relr relocations are applyable. */
MESOSPHERE_INIT_ABORT_UNLESS(dyn_relr != 0);
MESOSPHERE_INIT_ABORT_UNLESS(relr_ent == sizeof(Elf::Relr));
const size_t relr_count = relr_sz / sizeof(Elf::Relr);
Elf::Addr *where = nullptr;
for (size_t i = 0; i < relr_count; ++i) {
const auto &relr = reinterpret_cast<const Elf::Relr *>(dyn_relr)[i];
if (relr.IsLocation()) {
/* Update location. */
where = reinterpret_cast<Elf::Addr *>(base_address + relr.GetLocation());
/* Apply the relocation. */
*(where++) += base_address;
} else {
/* Get the bitmap. */
u64 bitmap = relr.GetBitmap();
/* Apply all relocations. */
while (bitmap != 0) {
const u64 next = util::CountTrailingZeros(bitmap);
bitmap &= ~(static_cast<u64>(1) << next);
where[next] += base_address;
}
/* Advance. */
where += BITSIZEOF(bitmap) - 1;
}
}
}
}

View file

@ -171,6 +171,9 @@ namespace ams::kern::init {
const KMemoryRegion &slab_region = KMemoryLayout::GetSlabRegion();
KVirtualAddress address = slab_region.GetAddress();
/* Clear the slab region. */
std::memset(GetVoidPointer(address), 0, slab_region.GetSize());
/* Initialize slab type array to be in sorted order. */
KSlabType slab_types[KSlabType_Count];
for (size_t i = 0; i < util::size(slab_types); i++) { slab_types[i] = static_cast<KSlabType>(i); }

View file

@ -27,6 +27,7 @@ namespace ams::kern {
constinit KPhysicalAddress g_initial_process_binary_phys_addr = Null<KPhysicalAddress>;
constinit KVirtualAddress g_initial_process_binary_address = Null<KVirtualAddress>;
constinit size_t g_initial_process_binary_size = 0;
constinit InitialProcessBinaryHeader g_initial_process_binary_header = {};
constinit size_t g_initial_process_secure_memory_size = 0;
constinit u64 g_initial_process_id_min = std::numeric_limits<u64>::max();
@ -135,7 +136,7 @@ namespace ams::kern {
{
/* Allocate the previously unreserved pages. */
KPageGroup unreserve_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
/* Add the previously reserved pages. */
if (src_pool == dst_pool && binary_pages != 0) {
@ -155,40 +156,14 @@ namespace ams::kern {
KPageGroup *process_pg = std::addressof(pg);
ON_SCOPE_EXIT { process_pg->Close(); };
/* Get the temporary region. */
const auto &temp_region = KMemoryLayout::GetTempRegion();
MESOSPHERE_ABORT_UNLESS(temp_region.GetEndAddress() != 0);
/* Map the process's memory into the temporary region. */
KProcessAddress temp_address = Null<KProcessAddress>;
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
/* Setup the new page group's memory, so that we can load the process. */
{
/* Copy the unaligned ending of the compressed binary. */
if (const size_t unaligned_size = binary_size - util::AlignDown(binary_size, PageSize); unaligned_size != 0) {
std::memcpy(GetVoidPointer(temp_address + process_size - unaligned_size), GetVoidPointer(data + binary_size - unaligned_size), unaligned_size);
}
/* Copy the aligned part of the compressed binary. */
if (const size_t aligned_size = util::AlignDown(binary_size, PageSize); aligned_size != 0 && src_pool == dst_pool) {
std::memmove(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(temp_address), aligned_size);
} else {
if (src_pool != dst_pool) {
std::memcpy(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(data), aligned_size);
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(data), aligned_size / PageSize);
}
}
/* Clear the first part of the memory. */
std::memset(GetVoidPointer(temp_address), 0, process_size - binary_size);
}
/* Load the process. */
MESOSPHERE_R_ABORT_UNLESS(reader.Load(temp_address, params, temp_address + process_size - binary_size));
reader.Load(pg, data);
/* Unmap the temporary mapping. */
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, pg, KMemoryState_Kernel));
/* If necessary, close/release the aligned part of the data we just loaded. */
if (const size_t aligned_bin_size = util::AlignDown(binary_size, PageSize); aligned_bin_size != 0 && src_pool != dst_pool) {
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(data), aligned_bin_size / PageSize);
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_bin_size);
}
/* Create a KProcess object. */
new_process = KProcess::Create();
@ -198,7 +173,7 @@ namespace ams::kern {
/* If the pool is the same, we need to use the workaround page group. */
if (src_pool == dst_pool) {
/* Allocate a new, usable group for the process. */
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast<size_t>(params.code_num_pages), KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast<size_t>(params.code_num_pages), 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
/* Copy data from the working page group to the usable one. */
auto work_it = pg.begin();
@ -241,11 +216,6 @@ namespace ams::kern {
MESOSPHERE_R_ABORT_UNLESS(new_process->Initialize(params, *process_pg, reader.GetCapabilities(), reader.GetNumCapabilities(), std::addressof(Kernel::GetSystemResourceLimit()), dst_pool, reader.IsImmortal()));
}
/* Release the memory that was previously reserved. */
if (const size_t aligned_bin_size = util::AlignDown(binary_size, PageSize); aligned_bin_size != 0 && src_pool != dst_pool) {
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_bin_size);
}
/* Set the process's memory permissions. */
MESOSPHERE_R_ABORT_UNLESS(reader.SetMemoryPermissions(new_process->GetPageTable(), params));
@ -275,10 +245,11 @@ namespace ams::kern {
}
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr) {
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr == Null<KPhysicalAddress>);
g_initial_process_binary_phys_addr = phys_addr;
g_initial_process_binary_size = size;
}
KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
@ -287,6 +258,12 @@ namespace ams::kern {
return g_initial_process_binary_phys_addr;
}
size_t GetInitialProcessBinarySize() {
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr != Null<KPhysicalAddress>);
return g_initial_process_binary_size;
}
u64 GetInitialProcessIdMin() {
return g_initial_process_id_min;
}
@ -305,14 +282,17 @@ namespace ams::kern {
LoadInitialProcessBinaryHeader();
if (g_initial_process_binary_header.num_processes > 0) {
/* Reserve pages for the initial process binary from the system resource limit. */
const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize);
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, total_size));
/* Ensure that we have a non-zero size. */
const size_t expected_size = g_initial_process_binary_size;
MESOSPHERE_INIT_ABORT_UNLESS(expected_size != 0);
/* The initial process binary is potentially over-allocated, so free any extra pages. */
if (total_size < InitialProcessBinarySizeMax) {
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(g_initial_process_binary_address + total_size), (InitialProcessBinarySizeMax - total_size) / PageSize);
}
/* Ensure that the size we need to reserve is as we expect it to be. */
const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize);
MESOSPHERE_ABORT_UNLESS(total_size == expected_size);
MESOSPHERE_ABORT_UNLESS(total_size <= InitialProcessBinarySizeMax);
/* Reserve pages for the initial process binary from the system resource limit. */
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, total_size));
return total_size;
} else {

View file

@ -184,6 +184,11 @@ namespace ams::kern {
case RegionType::NoMapping:
break;
case RegionType::KernelTraceBuffer:
/* NOTE: This does not match official, but is used to make pre-processing hbl capabilities in userland unnecessary. */
/* If ktrace isn't enabled, allow ktrace to succeed without mapping anything. */
if constexpr (!ams::kern::IsKTraceEnabled) {
break;
}
case RegionType::OnMemoryBootImage:
case RegionType::DTB:
R_TRY(f(MemoryRegions[static_cast<u32>(type)], perm));

View file

@ -107,7 +107,6 @@ namespace ams::kern {
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
new_sessions = cur_sessions + 1;
} while (!m_num_sessions.CompareExchangeWeak<std::memory_order_relaxed>(cur_sessions, new_sessions));
}
/* Atomically update the peak session tracking. */
@ -182,7 +181,6 @@ namespace ams::kern {
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
new_sessions = cur_sessions + 1;
} while (!m_num_sessions.CompareExchangeWeak<std::memory_order_relaxed>(cur_sessions, new_sessions));
}
/* Atomically update the peak session tracking. */

View file

@ -118,12 +118,12 @@ namespace ams::kern {
const size_t cur_size = std::min(remaining, info.GetEndAddress() - GetInteger(cur_address));
/* Read the memory. */
if (info.GetState() != KMemoryState_Io) {
if (info.GetSvcState() != ams::svc::MemoryState_Io) {
/* The memory is normal memory. */
R_TRY(target_pt.ReadDebugMemory(GetVoidPointer(buffer), cur_address, cur_size));
} else {
/* The memory is IO memory. */
R_TRY(target_pt.ReadDebugIoMemory(GetVoidPointer(buffer), cur_address, cur_size));
R_TRY(target_pt.ReadDebugIoMemory(GetVoidPointer(buffer), cur_address, cur_size, info.GetState()));
}
/* Advance. */
@ -181,12 +181,12 @@ namespace ams::kern {
const size_t cur_size = std::min(remaining, info.GetEndAddress() - GetInteger(cur_address));
/* Read the memory. */
if (info.GetState() != KMemoryState_Io) {
if (info.GetSvcState() != ams::svc::MemoryState_Io) {
/* The memory is normal memory. */
R_TRY(target_pt.WriteDebugMemory(cur_address, GetVoidPointer(buffer), cur_size));
} else {
/* The memory is IO memory. */
R_TRY(target_pt.WriteDebugIoMemory(cur_address, GetVoidPointer(buffer), cur_size));
R_TRY(target_pt.WriteDebugIoMemory(cur_address, GetVoidPointer(buffer), cur_size, info.GetState()));
}
/* Advance. */

View file

@ -73,6 +73,98 @@ namespace ams::kern {
}
}
NOINLINE void LoadInitialProcessSegment(const KPageGroup &pg, size_t seg_offset, size_t seg_size, size_t binary_size, KVirtualAddress data, bool compressed) {
/* Save the original binary extents, for later use. */
const KPhysicalAddress binary_phys = KMemoryLayout::GetLinearPhysicalAddress(data);
/* Create a page group representing the segment. */
KPageGroup segment_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
MESOSPHERE_R_ABORT_UNLESS(pg.CopyRangeTo(segment_pg, seg_offset, util::AlignUp(seg_size, PageSize)));
/* Setup the new page group's memory so that we can load the segment. */
{
KVirtualAddress last_block = Null<KVirtualAddress>;
KVirtualAddress last_data = Null<KVirtualAddress>;
size_t last_copy_size = 0;
size_t last_clear_size = 0;
size_t remaining_copy_size = binary_size;
for (const auto &block : segment_pg) {
/* Get the current block extents. */
const auto block_addr = block.GetAddress();
const size_t block_size = block.GetSize();
if (remaining_copy_size > 0) {
/* Determine if we need to copy anything. */
const size_t cur_size = std::min<size_t>(block_size, remaining_copy_size);
/* NOTE: The first block may potentially overlap the binary we want to copy to. */
/* Consider e.g. the case where the overall compressed image has size 0x40000, seg_offset is 0x30000, and binary_size is > 0x20000. */
/* Suppose too that data points, say, 0x18000 into the compressed image. */
/* Suppose finally that we simply naively copy in order. */
/* The first iteration of this loop will perform an 0x10000 copy from image+0x18000 to image + 0x30000 (as there is no overlap). */
/* The second iteration will perform a copy from image+0x28000 to <allocated pages>. */
/* However, the first copy will have trashed the data in the second copy. */
/* Thus, we must copy the first block after-the-fact to avoid potentially trashing data in the overlap case. */
/* It is guaranteed by pre-condition that only the very first block can overlap with the physical binary, so we can simply memmove it at the end. */
if (last_block != Null<KVirtualAddress>) {
/* This is guaranteed by pre-condition, but for ease of debugging, check for no overlap. */
MESOSPHERE_ASSERT(!util::HasOverlap(GetInteger(binary_phys), binary_size, GetInteger(block_addr), cur_size));
MESOSPHERE_UNUSED(binary_phys);
/* We need to copy. */
std::memcpy(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_addr)), GetVoidPointer(data), cur_size);
/* If we need to, clear past where we're copying. */
if (cur_size != block_size) {
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_addr + cur_size)), 0, block_size - cur_size);
}
/* Advance. */
remaining_copy_size -= cur_size;
data += cur_size;
} else {
/* Save the first block, which may potentially overlap, so that we can copy it later. */
last_block = KMemoryLayout::GetLinearVirtualAddress(block_addr);
last_data = data;
last_copy_size = cur_size;
last_clear_size = block_size - cur_size;
/* Advance. */
remaining_copy_size -= cur_size;
data += cur_size;
}
} else {
/* We don't have data to copy, so we should just clear the pages. */
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_addr)), 0, block_size);
}
}
/* Handle a last block. */
if (last_copy_size != 0) {
if (last_block != last_data) {
std::memmove(GetVoidPointer(last_block), GetVoidPointer(last_data), last_copy_size);
}
if (last_clear_size != 0) {
std::memset(GetVoidPointer(last_block + last_copy_size), 0, last_clear_size);
}
}
}
/* If compressed, uncompress the data. */
if (compressed) {
/* Get the temporary region. */
const auto &temp_region = KMemoryLayout::GetTempRegion();
MESOSPHERE_ABORT_UNLESS(temp_region.GetEndAddress() != 0);
/* Map the process's memory into the temporary region. */
KProcessAddress temp_address = Null<KProcessAddress>;
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), segment_pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
ON_SCOPE_EXIT { MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, segment_pg, KMemoryState_Kernel)); };
/* Uncompress the data. */
BlzUncompress(GetVoidPointer(temp_address + binary_size));
}
}
}
Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const {
@ -112,12 +204,17 @@ namespace ams::kern {
const uintptr_t map_end = map_start + map_size;
MESOSPHERE_ABORT_UNLESS(start_address == 0);
/* Default fields in parameter to zero. */
*out = {};
/* Set fields in parameter. */
out->code_address = map_start + start_address;
out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize;
out->program_id = m_kip_header.GetProgramId();
out->version = m_kip_header.GetVersion();
out->flags = 0;
out->code_address = map_start + start_address;
out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize;
out->program_id = m_kip_header.GetProgramId();
out->version = m_kip_header.GetVersion();
out->flags = 0;
out->reslimit = ams::svc::InvalidHandle;
out->system_resource_num_pages = 0;
MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize));
/* Copy name field. */
@ -146,42 +243,55 @@ namespace ams::kern {
R_SUCCEED();
}
Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter &params, KProcessAddress src) const {
void KInitialProcessReader::Load(const KPageGroup &pg, KVirtualAddress data) const {
/* Prepare to layout the data. */
const KProcessAddress rx_address = address + m_kip_header.GetRxAddress();
const KProcessAddress ro_address = address + m_kip_header.GetRoAddress();
const KProcessAddress rw_address = address + m_kip_header.GetRwAddress();
const u8 *rx_binary = GetPointer<const u8>(src);
const u8 *ro_binary = rx_binary + m_kip_header.GetRxCompressedSize();
const u8 *rw_binary = ro_binary + m_kip_header.GetRoCompressedSize();
const KVirtualAddress rx_data = data;
const KVirtualAddress ro_data = rx_data + m_kip_header.GetRxCompressedSize();
const KVirtualAddress rw_data = ro_data + m_kip_header.GetRoCompressedSize();
const size_t rx_size = m_kip_header.GetRxSize();
const size_t ro_size = m_kip_header.GetRoSize();
const size_t rw_size = m_kip_header.GetRwSize();
/* Copy text. */
if (util::AlignUp(m_kip_header.GetRxSize(), PageSize)) {
std::memmove(GetVoidPointer(rx_address), rx_binary, m_kip_header.GetRxCompressedSize());
if (m_kip_header.IsRxCompressed()) {
BlzUncompress(GetVoidPointer(rx_address + m_kip_header.GetRxCompressedSize()));
/* If necessary, setup bss. */
if (const size_t bss_size = m_kip_header.GetBssSize(); bss_size > 0) {
/* Determine how many additional pages are needed for bss. */
const u64 rw_end = util::AlignUp<u64>(m_kip_header.GetRwAddress() + m_kip_header.GetRwSize(), PageSize);
const u64 bss_end = util::AlignUp<u64>(m_kip_header.GetBssAddress() + m_kip_header.GetBssSize(), PageSize);
if (rw_end != bss_end) {
/* Find the pages corresponding to bss. */
size_t cur_offset = 0;
size_t remaining_size = bss_end - rw_end;
size_t bss_offset = rw_end - m_kip_header.GetRxAddress();
for (auto it = pg.begin(); it != pg.end() && remaining_size > 0; ++it) {
/* Get the current size. */
const size_t cur_size = it->GetSize();
/* Determine if the offset is in range. */
const size_t rel_diff = bss_offset - cur_offset;
const bool is_before = cur_offset <= bss_offset;
cur_offset += cur_size;
if (is_before && bss_offset < cur_offset) {
/* It is, so clear the bss range. */
const size_t block_size = std::min<size_t>(cur_size - rel_diff, remaining_size);
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(it->GetAddress() + rel_diff)), 0, block_size);
/* Advance. */
cur_offset = bss_offset + block_size;
remaining_size -= block_size;
bss_offset += block_size;
}
}
}
}
/* Copy rodata. */
if (util::AlignUp(m_kip_header.GetRoSize(), PageSize)) {
std::memmove(GetVoidPointer(ro_address), ro_binary, m_kip_header.GetRoCompressedSize());
if (m_kip_header.IsRoCompressed()) {
BlzUncompress(GetVoidPointer(ro_address + m_kip_header.GetRoCompressedSize()));
}
}
/* Load .rwdata. */
LoadInitialProcessSegment(pg, m_kip_header.GetRwAddress() - m_kip_header.GetRxAddress(), rw_size, m_kip_header.GetRwCompressedSize(), rw_data, m_kip_header.IsRwCompressed());
/* Copy rwdata. */
if (util::AlignUp(m_kip_header.GetRwSize(), PageSize)) {
std::memmove(GetVoidPointer(rw_address), rw_binary, m_kip_header.GetRwCompressedSize());
if (m_kip_header.IsRwCompressed()) {
BlzUncompress(GetVoidPointer(rw_address + m_kip_header.GetRwCompressedSize()));
}
}
/* Load .rodata. */
LoadInitialProcessSegment(pg, m_kip_header.GetRoAddress() - m_kip_header.GetRxAddress(), ro_size, m_kip_header.GetRoCompressedSize(), ro_data, m_kip_header.IsRoCompressed());
MESOSPHERE_UNUSED(params);
R_SUCCEED();
/* Load .text. */
LoadInitialProcessSegment(pg, m_kip_header.GetRxAddress() - m_kip_header.GetRxAddress(), rx_size, m_kip_header.GetRxCompressedSize(), rx_data, m_kip_header.IsRxCompressed());
}
Result KInitialProcessReader::SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter &params) const {

View file

@ -21,7 +21,8 @@ namespace ams::kern {
constexpr const std::pair<KMemoryState, const char *> MemoryStateNames[] = {
{KMemoryState_Free , "----- Free -----"},
{KMemoryState_Io , "Io "},
{KMemoryState_IoMemory , "IoMemory "},
{KMemoryState_IoRegister , "IoRegister "},
{KMemoryState_Static , "Static "},
{KMemoryState_Code , "Code "},
{KMemoryState_CodeData , "CodeData "},
@ -222,7 +223,7 @@ namespace ams::kern {
}
/* Update block state. */
it->Update(state, perm, attr, cur_address == address, set_disable_attr, clear_disable_attr);
it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
}
@ -232,7 +233,7 @@ namespace ams::kern {
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr) {
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr) {
/* Ensure for auditing that we never end up with an invalid tree. */
KScopedMemoryBlockManagerAuditor auditor(this);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
@ -269,7 +270,7 @@ namespace ams::kern {
}
/* Update block state. */
it->Update(state, perm, attr, false, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
@ -335,6 +336,62 @@ namespace ams::kern {
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, u32 mask, u32 attr) {
/* Ensure for auditing that we never end up with an invalid tree. */
KScopedMemoryBlockManagerAuditor auditor(this);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if ((it->GetAttribute() & mask) != attr) {
/* If we need to, create a new block before and insert it. */
if (cur_info.GetAddress() != GetInteger(cur_address)) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
/* If we need to, create a new block after and insert it. */
if (cur_info.GetSize() > remaining_size) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
/* Update block state. */
it->UpdateAttribute(mask, attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
/* If we already have the right attributes, just advance. */
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
}
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
/* Debug. */
bool KMemoryBlockManager::CheckState() const {
/* If we fail, we should dump blocks. */

View file

@ -108,7 +108,8 @@ namespace ams::kern {
/* Free each region to its corresponding heap. */
size_t reserved_sizes[MaxManagerCount] = {};
const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
const size_t ini_size = GetInitialProcessBinarySize();
const KPhysicalAddress ini_end = ini_start + ini_size;
const KPhysicalAddress ini_last = ini_end - 1;
for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
@ -126,13 +127,13 @@ namespace ams::kern {
}
/* Open/reserve the ini memory. */
manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize);
reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
manager.OpenFirst(ini_start, ini_size / PageSize);
reserved_sizes[it.GetAttributes()] += ini_size;
/* Free memory after the ini to the heap. */
if (ini_last != cur_last) {
MESOSPHERE_ABORT_UNLESS(cur_end != Null<KPhysicalAddress>);
manager.Free(ini_end, cur_end - ini_end);
manager.Free(ini_end, (cur_end - ini_end) / PageSize);
}
} else {
/* Ensure there's no partial overlap with the ini image. */
@ -224,7 +225,7 @@ namespace ams::kern {
return allocated_block;
}
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random) {
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index) {
/* Choose a heap based on our page size request. */
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
@ -240,7 +241,7 @@ namespace ams::kern {
};
/* Keep allocating until we've allocated all our pages. */
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
for (s32 index = heap_index; index >= min_heap_index && num_pages > 0; index--) {
const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
while (num_pages >= pages_per_alloc) {
@ -273,7 +274,7 @@ namespace ams::kern {
R_SUCCEED();
}
Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option) {
Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option) {
MESOSPHERE_ASSERT(out != nullptr);
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
@ -284,8 +285,11 @@ namespace ams::kern {
const auto [pool, dir] = DecodeOption(option);
KScopedLightLock lk(m_pool_locks[pool]);
/* Choose a heap based on our alignment size request. */
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(align_pages, align_pages);
/* Allocate the page group. */
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true));
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true, heap_index));
/* Open the first reference to the pages. */
for (const auto &block : *out) {
@ -325,8 +329,11 @@ namespace ams::kern {
const bool has_optimized = m_has_optimized_process[pool];
const bool is_optimized = m_optimized_process_ids[pool] == process_id;
/* Always use the minimum alignment size. */
const s32 heap_index = 0;
/* Allocate the page group. */
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false));
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false, heap_index));
/* Set whether we should optimize. */
optimized = has_optimized && is_optimized;

View file

@ -84,6 +84,58 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageGroup::CopyRangeTo(KPageGroup &out, size_t range_offset, size_t range_size) const {
/* Get the previous last block for the group. */
KBlockInfo * const out_last = out.m_last_block;
const auto out_last_addr = out_last != nullptr ? out_last->GetAddress() : Null<KPhysicalAddress>;
const auto out_last_np = out_last != nullptr ? out_last->GetNumPages() : 0;
/* Ensure we cleanup the group on failure. */
ON_RESULT_FAILURE {
KBlockInfo *cur = out_last != nullptr ? out_last->GetNext() : out.m_first_block;
while (cur != nullptr) {
KBlockInfo *next = cur->GetNext();
out.m_manager->Free(cur);
cur = next;
}
if (out_last != nullptr) {
out_last->Initialize(out_last_addr, out_last_np);
out_last->SetNext(nullptr);
} else {
out.m_first_block = nullptr;
}
out.m_last_block = out_last;
};
/* Find the pages within the requested range. */
size_t cur_offset = 0, remaining_size = range_size;
for (auto it = this->begin(); it != this->end() && remaining_size > 0; ++it) {
/* Get the current size. */
const size_t cur_size = it->GetSize();
/* Determine if the offset is in range. */
const size_t rel_diff = range_offset - cur_offset;
const bool is_before = cur_offset <= range_offset;
cur_offset += cur_size;
if (is_before && range_offset < cur_offset) {
/* It is, so add the block. */
const size_t block_size = std::min<size_t>(cur_size - rel_diff, remaining_size);
R_TRY(out.AddBlock(it->GetAddress() + rel_diff, block_size / PageSize));
/* Advance. */
cur_offset = range_offset + block_size;
remaining_size -= block_size;
range_offset += block_size;
}
}
/* Check that we successfully copied the range. */
MESOSPHERE_ABORT_UNLESS(remaining_size == 0);
R_SUCCEED();
}
void KPageGroup::Open() const {
auto &mm = Kernel::GetMemoryManager();

File diff suppressed because it is too large Load diff

View file

@ -51,7 +51,7 @@ namespace ams::kern {
}
}
/* Wait for all children threads to terminate.*/
/* Wait for all children threads to terminate. */
while (true) {
/* Get the next child. */
KThread *cur_child = nullptr;
@ -263,28 +263,45 @@ namespace ams::kern {
MESOSPHERE_ASSERT(res_limit != nullptr);
MESOSPHERE_ABORT_UNLESS((params.code_num_pages * PageSize) / PageSize == static_cast<size_t>(params.code_num_pages));
/* Determine is application. */
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication) != 0;
/* Set members. */
m_memory_pool = pool;
m_resource_limit = res_limit;
m_system_resource = std::addressof(is_app ? Kernel::GetApplicationSystemResource() : Kernel::GetSystemSystemResource());
m_is_immortal = immortal;
m_memory_pool = pool;
m_resource_limit = res_limit;
m_is_default_application_system_resource = false;
m_is_immortal = immortal;
/* Open reference to our system resource. */
m_system_resource->Open();
/* Setup our system resource. */
if (const size_t system_resource_num_pages = params.system_resource_num_pages; system_resource_num_pages != 0) {
/* Create a secure system resource. */
KSecureSystemResource *secure_resource = KSecureSystemResource::Create();
R_UNLESS(secure_resource != nullptr, svc::ResultOutOfResource());
ON_RESULT_FAILURE { secure_resource->Close(); };
/* Initialize the secure resource. */
R_TRY(secure_resource->Initialize(system_resource_num_pages * PageSize, m_resource_limit, m_memory_pool));
/* Set our system resource. */
m_system_resource = secure_resource;
} else {
/* Use the system-wide system resource. */
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
m_system_resource = std::addressof(is_app ? Kernel::GetApplicationSystemResource() : Kernel::GetSystemSystemResource());
m_is_default_application_system_resource = is_app;
/* Open reference to the system resource. */
m_system_resource->Open();
}
/* Ensure we clean up our secure resource, if we fail. */
ON_RESULT_FAILURE { m_system_resource->Close(); };
/* Setup page table. */
/* NOTE: Nintendo passes process ID despite not having set it yet. */
/* This goes completely unused, but even so... */
{
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit));
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit));
}
ON_RESULT_FAILURE { m_page_table.Finalize(); };
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
/* Ensure we can insert the code region. */
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
@ -315,9 +332,10 @@ namespace ams::kern {
MESOSPHERE_ASSERT(res_limit != nullptr);
/* Set pool and resource limit. */
m_memory_pool = pool;
m_resource_limit = res_limit;
m_is_immortal = false;
m_memory_pool = pool;
m_resource_limit = res_limit;
m_is_default_application_system_resource = false;
m_is_immortal = false;
/* Get the memory sizes. */
const size_t code_num_pages = params.code_num_pages;
@ -348,6 +366,8 @@ namespace ams::kern {
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
m_system_resource = std::addressof(is_app ? Kernel::GetApplicationSystemResource() : Kernel::GetSystemSystemResource());
m_is_default_application_system_resource = is_app;
/* Open reference to the system resource. */
m_system_resource->Open();
}
@ -356,13 +376,9 @@ namespace ams::kern {
ON_RESULT_FAILURE { m_system_resource->Close(); };
/* Setup page table. */
/* NOTE: Nintendo passes process ID despite not having set it yet. */
/* This goes completely unused, but even so... */
{
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, m_system_resource, res_limit));
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
@ -447,7 +463,7 @@ namespace ams::kern {
void KProcess::Exit() {
MESOSPHERE_ASSERT_THIS();
/* Determine whether we need to start terminating */
/* Determine whether we need to start terminating. */
bool needs_terminate = false;
{
KScopedLightLock lk(m_state_lock);
@ -471,7 +487,7 @@ namespace ams::kern {
MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", m_process_id, m_name);
/* Register the process as a work task. */
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitProcess, this);
}
/* Exit the current thread. */
@ -516,7 +532,7 @@ namespace ams::kern {
MESOSPHERE_LOG("KProcess::Terminate() FAIL pid=%ld name=%-12s\n", m_process_id, m_name);
/* Register the process as a work task. */
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitProcess, this);
}
}
@ -848,7 +864,7 @@ namespace ams::kern {
size_t KProcess::GetUsedUserPhysicalMemorySize() const {
const size_t norm_size = m_page_table.GetNormalMemorySize();
const size_t other_size = m_code_size + m_main_thread_stack_size;
const size_t sec_size = m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->CalculateRequiredSecureMemorySize() : 0;
const size_t sec_size = this->GetRequiredSecureMemorySizeNonDefault();
return norm_size + other_size + sec_size;
}
@ -856,13 +872,20 @@ namespace ams::kern {
size_t KProcess::GetTotalUserPhysicalMemorySize() const {
/* Get the amount of free and used size. */
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
const size_t used_size = this->GetUsedUserPhysicalMemorySize();
const size_t max_size = m_max_process_memory;
/* Determine used size. */
/* NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike GetUsedUserPhysicalMemorySize(). */
const size_t norm_size = m_page_table.GetNormalMemorySize();
const size_t other_size = m_code_size + m_main_thread_stack_size;
const size_t sec_size = this->GetRequiredSecureMemorySize();
const size_t used_size = norm_size + other_size + sec_size;
/* NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo does it this way. */
if (used_size + free_size > max_size) {
return max_size;
} else {
return free_size + used_size;
return free_size + this->GetUsedUserPhysicalMemorySize();
}
}
@ -876,14 +899,20 @@ namespace ams::kern {
size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
/* Get the amount of free and used size. */
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
const size_t used_size = this->GetUsedUserPhysicalMemorySize();
const size_t sec_size = m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->CalculateRequiredSecureMemorySize() : 0;
const size_t max_size = m_max_process_memory;
/* Determine used size. */
/* NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike GetUsedUserPhysicalMemorySize(). */
const size_t norm_size = m_page_table.GetNormalMemorySize();
const size_t other_size = m_code_size + m_main_thread_stack_size;
const size_t sec_size = this->GetRequiredSecureMemorySize();
const size_t used_size = norm_size + other_size + sec_size;
/* NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo does it this way. */
if (used_size + free_size > max_size) {
return max_size - sec_size;
return max_size - this->GetRequiredSecureMemorySizeNonDefault();
} else {
return free_size + used_size - sec_size;
return free_size + this->GetUsedNonSystemUserPhysicalMemorySize();
}
}

View file

@ -36,7 +36,7 @@ namespace ams::kern {
/* Cleanup the session list. */
while (true) {
/* Get the last session in the list */
/* Get the last session in the list. */
KServerSession *session = nullptr;
{
KScopedSchedulerLock sl;
@ -56,7 +56,7 @@ namespace ams::kern {
/* Cleanup the light session list. */
while (true) {
/* Get the last session in the list */
/* Get the last session in the list. */
KLightServerSession *session = nullptr;
{
KScopedSchedulerLock sl;

View file

@ -650,7 +650,7 @@ namespace ams::kern {
const auto src_state = src_user ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
/* Determine the source permission. User buffer should be unmapped + read, TLS should be user readable. */
const KMemoryPermission src_perm = static_cast<KMemoryPermission>(src_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelRead : KMemoryPermission_UserRead);
const KMemoryPermission src_perm = static_cast<KMemoryPermission>(src_user ? (KMemoryPermission_NotMapped | KMemoryPermission_KernelRead) : KMemoryPermission_UserRead);
/* Perform the fast part of the copy. */
R_TRY(src_page_table.CopyMemoryFromLinearToKernel(reinterpret_cast<uintptr_t>(dst_msg_ptr) + offset_words, fast_size, src_message_buffer + offset_words,
@ -753,7 +753,7 @@ namespace ams::kern {
/* Perform the pointer data copy. */
const bool dst_heap = dst_user && dst_recv_list.IsToMessageBuffer();
const auto dst_state = dst_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_heap ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite);
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_heap ? (KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite) : KMemoryPermission_UserReadWrite);
R_TRY(dst_page_table.CopyMemoryFromUserToLinear(recv_pointer, recv_size,
dst_state, dst_state,
dst_perm,
@ -911,7 +911,7 @@ namespace ams::kern {
const auto dst_state = dst_user ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
/* Determine the dst permission. User buffer should be unmapped + read, TLS should be user readable. */
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite);
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_user ? (KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite) : KMemoryPermission_UserReadWrite);
/* Perform the fast part of the copy. */
R_TRY(dst_page_table.CopyMemoryFromKernelToLinear(dst_message_buffer + offset_words, fast_size,

View file

@ -37,7 +37,7 @@ namespace ams::kern {
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate the memory. */
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, owner->GetAllocateOption()));
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, 1, owner->GetAllocateOption()));
/* Commit our reservation. */
memory_reservation.Commit();

View file

@ -39,17 +39,18 @@ namespace ams::kern {
KPhysicalAddress KSystemControlBase::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
if (intended_dram_size * 2 < real_dram_size) {
if (intended_dram_size * 2 <= real_dram_size) {
return base_address;
} else {
return base_address + ((real_dram_size - intended_dram_size) / 2);
}
}
void KSystemControlBase::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
void KSystemControlBase::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out, KPhysicalAddress kern_base_address) {
*out = {
.address = GetInteger(KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress)) + KSystemControl::Init::GetIntendedMemorySize() - InitialProcessBinarySizeMax,
._08 = 0,
.address = GetInteger(KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress)) + KSystemControl::Init::GetIntendedMemorySize() - InitialProcessBinarySizeMax,
._08 = 0,
.kern_address = GetInteger(kern_base_address),
};
}
@ -77,7 +78,7 @@ namespace ams::kern {
void KSystemControlBase::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
#if defined(ATMOSPHERE_ARCH_ARM64)
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0, false>(core_id, entrypoint, arg)) == 0);
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0>(core_id, entrypoint, arg)) == 0);
#else
AMS_INFINITE_LOOP();
#endif

View file

@ -476,10 +476,6 @@ namespace ams::kern {
m_parent->ClearRunningThread(this);
}
/* Signal. */
m_signaled = true;
KSynchronizationObject::NotifyAvailable();
/* Call the on thread termination handler. */
KThreadContext::OnThreadTerminating(this);
@ -507,6 +503,13 @@ namespace ams::kern {
cpu::SynchronizeCores(m_parent->GetPhysicalCoreMask());
}
/* Acquire the scheduler lock. */
KScopedSchedulerLock sl;
/* Signal. */
m_signaled = true;
KSynchronizationObject::NotifyAvailable();
/* Close the thread. */
this->Close();
}
@ -1328,7 +1331,7 @@ namespace ams::kern {
this->StartTermination();
/* Register the thread as a work task. */
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitThread, this);
}
MESOSPHERE_PANIC("KThread::Exit() would return");

View file

@ -115,8 +115,9 @@ namespace ams::kern {
/* Perform more core-0 specific initialization. */
if (core_id == 0) {
/* Initialize the exit worker manager, so that threads and processes may exit cleanly. */
Kernel::GetWorkerTaskManager(KWorkerTaskManager::WorkerType_Exit).Initialize(KWorkerTaskManager::ExitWorkerPriority);
/* Initialize the exit worker managers, so that threads and processes may exit cleanly. */
Kernel::GetWorkerTaskManager(KWorkerTaskManager::WorkerType_ExitThread).Initialize(KWorkerTaskManager::ExitWorkerPriority);
Kernel::GetWorkerTaskManager(KWorkerTaskManager::WorkerType_ExitProcess).Initialize(KWorkerTaskManager::ExitWorkerPriority);
/* Setup so that we may sleep later, and reserve memory for secure applets. */
KSystemControl::InitializePhase2();

View file

@ -37,7 +37,7 @@ namespace ams::kern::svc {
R_SUCCEED();
}
Result QueryIoMapping(uintptr_t *out_address, size_t *out_size, uint64_t phys_addr, size_t size) {
Result QueryMemoryMapping(uintptr_t *out_address, size_t *out_size, uint64_t phys_addr, size_t size) {
/* Declare variables we'll populate. */
KProcessAddress found_address = Null<KProcessAddress>;
size_t found_size = 0;
@ -125,15 +125,15 @@ namespace ams::kern::svc {
R_RETURN(QueryPhysicalAddress(out_info, address));
}
Result QueryIoMapping64(ams::svc::Address *out_address, ams::svc::Size *out_size, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) {
Result QueryMemoryMapping64(ams::svc::Address *out_address, ams::svc::Size *out_size, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) {
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
static_assert(sizeof(*out_size) == sizeof(size_t));
R_RETURN(QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), reinterpret_cast<size_t *>(out_size), physical_address, size));
R_RETURN(QueryMemoryMapping(reinterpret_cast<uintptr_t *>(out_address), reinterpret_cast<size_t *>(out_size), physical_address, size));
}
Result LegacyQueryIoMapping64(ams::svc::Address *out_address, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) {
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
R_RETURN(QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), nullptr, physical_address, size));
R_RETURN(QueryMemoryMapping(reinterpret_cast<uintptr_t *>(out_address), nullptr, physical_address, size));
}
/* ============================= 64From32 ABI ============================= */
@ -150,15 +150,15 @@ namespace ams::kern::svc {
R_SUCCEED();
}
Result QueryIoMapping64From32(ams::svc::Address *out_address, ams::svc::Size *out_size, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) {
Result QueryMemoryMapping64From32(ams::svc::Address *out_address, ams::svc::Size *out_size, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) {
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
static_assert(sizeof(*out_size) == sizeof(size_t));
R_RETURN(QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), reinterpret_cast<size_t *>(out_size), physical_address, size));
R_RETURN(QueryMemoryMapping(reinterpret_cast<uintptr_t *>(out_address), reinterpret_cast<size_t *>(out_size), physical_address, size));
}
Result LegacyQueryIoMapping64From32(ams::svc::Address *out_address, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) {
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
R_RETURN(QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), nullptr, physical_address, size));
R_RETURN(QueryMemoryMapping(reinterpret_cast<uintptr_t *>(out_address), nullptr, physical_address, size));
}
}

View file

@ -36,15 +36,15 @@ namespace ams::kern::svc {
size_t remaining = size;
while (remaining > 0) {
/* Get a contiguous range to operate on. */
KPageTableBase::MemoryRange contig_range = { .address = Null<KPhysicalAddress>, .size = 0 };
KPageTableBase::MemoryRange contig_range;
R_TRY(page_table.OpenMemoryRangeForProcessCacheOperation(std::addressof(contig_range), cur_address, aligned_end - cur_address));
/* Close the range when we're done operating on it. */
ON_SCOPE_EXIT { contig_range.Close(); };
/* Adjust to remain within range. */
KVirtualAddress operate_address = KMemoryLayout::GetLinearVirtualAddress(contig_range.address);
size_t operate_size = contig_range.size;
KVirtualAddress operate_address = KMemoryLayout::GetLinearVirtualAddress(contig_range.GetAddress());
size_t operate_size = contig_range.GetSize();
if (cur_address < address) {
operate_address += (address - cur_address);
operate_size -= (address - cur_address);
@ -57,7 +57,7 @@ namespace ams::kern::svc {
operation.Operate(GetVoidPointer(operate_address), operate_size);
/* Advance. */
cur_address += contig_range.size;
cur_address += contig_range.GetSize();
remaining -= operate_size;
}
MESOSPHERE_ASSERT(remaining == 0);
@ -102,7 +102,11 @@ namespace ams::kern::svc {
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
/* Invalidate the cache. */
R_TRY(process->GetPageTable().InvalidateProcessDataCache(address, size));
if (process.GetPointerUnsafe() == GetCurrentProcessPointer()) {
R_TRY(process->GetPageTable().InvalidateCurrentProcessDataCache(address, size));
} else {
R_TRY(process->GetPageTable().InvalidateProcessDataCache(address, size));
}
R_SUCCEED();
}

View file

@ -106,6 +106,9 @@ namespace ams::kern::svc {
*out = 0;
}
break;
case ams::svc::InfoType_AliasRegionExtraSize:
*out = process->GetPageTable().GetAliasRegionExtraSize();
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
@ -134,6 +137,7 @@ namespace ams::kern::svc {
case ams::svc::InfoType_UsedNonSystemMemorySize:
case ams::svc::InfoType_IsApplication:
case ams::svc::InfoType_FreeThreadCount:
case ams::svc::InfoType_AliasRegionExtraSize:
{
/* These info types don't support non-zero subtypes. */
R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination());

View file

@ -21,7 +21,7 @@ namespace ams::kern::svc {
namespace {
Result MapInsecureMemory(uintptr_t address, size_t size) {
Result MapInsecurePhysicalMemory(uintptr_t address, size_t size) {
/* Validate the address/size. */
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
@ -33,10 +33,10 @@ namespace ams::kern::svc {
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_Insecure), svc::ResultInvalidMemoryRegion());
/* Map the insecure memory. */
R_RETURN(pt.MapInsecureMemory(address, size));
R_RETURN(pt.MapInsecurePhysicalMemory(address, size));
}
Result UnmapInsecureMemory(uintptr_t address, size_t size) {
Result UnmapInsecurePhysicalMemory(uintptr_t address, size_t size) {
/* Validate the address/size. */
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
@ -48,29 +48,29 @@ namespace ams::kern::svc {
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_Insecure), svc::ResultInvalidMemoryRegion());
/* Map the insecure memory. */
R_RETURN(pt.UnmapInsecureMemory(address, size));
R_RETURN(pt.UnmapInsecurePhysicalMemory(address, size));
}
}
/* ============================= 64 ABI ============================= */
Result MapInsecureMemory64(ams::svc::Address address, ams::svc::Size size) {
R_RETURN(MapInsecureMemory(address, size));
Result MapInsecurePhysicalMemory64(ams::svc::Address address, ams::svc::Size size) {
R_RETURN(MapInsecurePhysicalMemory(address, size));
}
Result UnmapInsecureMemory64(ams::svc::Address address, ams::svc::Size size) {
R_RETURN(UnmapInsecureMemory(address, size));
Result UnmapInsecurePhysicalMemory64(ams::svc::Address address, ams::svc::Size size) {
R_RETURN(UnmapInsecurePhysicalMemory(address, size));
}
/* ============================= 64From32 ABI ============================= */
Result MapInsecureMemory64From32(ams::svc::Address address, ams::svc::Size size) {
R_RETURN(MapInsecureMemory(address, size));
Result MapInsecurePhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) {
R_RETURN(MapInsecurePhysicalMemory(address, size));
}
Result UnmapInsecureMemory64From32(ams::svc::Address address, ams::svc::Size size) {
R_RETURN(UnmapInsecureMemory(address, size));
Result UnmapInsecurePhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) {
R_RETURN(UnmapInsecurePhysicalMemory(address, size));
}
}

View file

@ -127,7 +127,7 @@ namespace ams::kern::svc {
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
/* Verify that the mapping is in range. */
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_Io), svc::ResultInvalidMemoryRegion());
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, ams::svc::MemoryState_Io), svc::ResultInvalidMemoryRegion());
/* Validate the map permission. */
R_UNLESS(IsValidIoRegionPermission(map_perm), svc::ResultInvalidNewMemoryPermission());
@ -156,7 +156,7 @@ namespace ams::kern::svc {
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
/* Verify that the mapping is in range. */
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_Io), svc::ResultInvalidMemoryRegion());
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, ams::svc::MemoryState_Io), svc::ResultInvalidMemoryRegion());
/* Get the io region. */
KScopedAutoObject io_region = GetCurrentProcess().GetHandleTable().GetObject<KIoRegion>(io_region_handle);

View file

@ -143,7 +143,7 @@ namespace ams::kern::svc {
/* Get the process page table. */
auto &page_table = GetCurrentProcess().GetPageTable();
/* Lock the mesage buffer. */
/* Lock the message buffer. */
R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size));
{
@ -186,7 +186,7 @@ namespace ams::kern::svc {
/* Commit our reservation. */
event_reservation.Commit();
/* At end of scope, kill the standing references to the sub events. */
/* At end of scope, kill the standing event references. */
ON_SCOPE_EXIT {
event->GetReadableEvent().Close();
event->Close();
@ -215,7 +215,7 @@ namespace ams::kern::svc {
/* Get the process page table. */
auto &page_table = GetCurrentProcess().GetPageTable();
/* Lock the mesage buffer. */
/* Lock the message buffer. */
R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size));
/* Ensure that if we fail and aren't terminating that we unlock the user buffer. */
@ -242,7 +242,7 @@ namespace ams::kern::svc {
/* Get the process page table. */
auto &page_table = GetCurrentProcess().GetPageTable();
/* Lock the mesage buffer, getting its physical address. */
/* Lock the message buffer, getting its physical address. */
KPhysicalAddress message_paddr;
R_TRY(page_table.LockForIpcUserBuffer(std::addressof(message_paddr), message, buffer_size));

View file

@ -58,10 +58,13 @@ namespace ams::kern::svc {
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
/* Validate the attribute and mask. */
constexpr u32 SupportedMask = ams::svc::MemoryAttribute_Uncached;
constexpr u32 SupportedMask = ams::svc::MemoryAttribute_Uncached | ams::svc::MemoryAttribute_PermissionLocked;
R_UNLESS((mask | attr) == mask, svc::ResultInvalidCombination());
R_UNLESS((mask | attr | SupportedMask) == SupportedMask, svc::ResultInvalidCombination());
/* Check that permission locked is either being set or not masked. */
R_UNLESS((mask & ams::svc::MemoryAttribute_PermissionLocked) == (attr & ams::svc::MemoryAttribute_PermissionLocked), svc::ResultInvalidCombination());
/* Validate that the region is in range for the current process. */
auto &page_table = GetCurrentProcess().GetPageTable();
R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory());

View file

@ -27,7 +27,7 @@ namespace ams::kern::svc {
R_UNLESS(size < ams::kern::MainMemorySizeMax, svc::ResultInvalidSize());
/* Set the heap size. */
KProcessAddress address;
KProcessAddress address = Null<KProcessAddress>;
R_TRY(GetCurrentProcess().GetPageTable().SetHeapSize(std::addressof(address), size));
/* Set the output. */

Some files were not shown because too many files have changed in this diff Show more