mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-09 22:56:35 +00:00
Merge pull request #821 from Atmosphere-NX/mesosphere-dev
Mesosphere: Implement kernel initialization.
This commit is contained in:
commit
fa4a96d021
266 changed files with 31038 additions and 772 deletions
|
@ -1,4 +1,4 @@
|
|||
export ATMOSPHERE_DEFINES += -DATMOSPHERE_BOARD_NINTENDO_SWITCH -D__SWITCH__
|
||||
export ATMOSPHERE_DEFINES += -DATMOSPHERE_BOARD_NINTENDO_NX -D__SWITCH__
|
||||
export ATMOSPHERE_SETTINGS +=
|
||||
export ATMOSPHERE_CFLAGS +=
|
||||
export ATMOSPHERE_CXXFLAGS +=
|
|
@ -24,11 +24,11 @@ export ATMOSPHERE_ASFLAGS :=
|
|||
|
||||
ifeq ($(ATMOSPHERE_BOARD),nx-hac-001)
|
||||
export ATMOSPHERE_ARCH_DIR := arch/arm64
|
||||
export ATMOSPHERE_BOARD_DIR := board/nintendo/switch
|
||||
export ATMOSPHERE_BOARD_DIR := board/nintendo/nx
|
||||
export ATMOSPHERE_OS_DIR := os/horizon
|
||||
|
||||
export ATMOSPHERE_ARCH_NAME := arm64
|
||||
export ATMOSPHERE_BOARD_NAME := nintendo_switch
|
||||
export ATMOSPHERE_BOARD_NAME := nintendo_nx
|
||||
export ATMOSPHERE_OS_NAME := horizon
|
||||
endif
|
||||
|
||||
|
@ -76,14 +76,22 @@ TARGET := $(notdir $(CURDIR))
|
|||
BUILD := build
|
||||
DATA := data
|
||||
INCLUDES := include
|
||||
SOURCES ?= $(foreach d,$(filter-out source/arch source/board,$(wildcard source)),$(call DIR_WILDCARD,$d) $d)
|
||||
SOURCES ?= source $(foreach d,$(filter-out source/arch source/board source,$(wildcard source/*)),$(if $(wildcard $d/.),$(call DIR_WILDCARD,$d) $d,))
|
||||
|
||||
ifneq ($(strip $(wildcard source/$(ATMOSPHERE_ARCH_DIR)/.*)),)
|
||||
SOURCES += $(call DIR_WILDCARD,source/$(ATMOSPHERE_ARCH_DIR))
|
||||
SOURCES += source/$(ATMOSPHERE_ARCH_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_ARCH_DIR))
|
||||
endif
|
||||
ifneq ($(strip $(wildcard source/$(ATMOSPHERE_BOARD_DIR)/.*)),)
|
||||
SOURCES += $(call DIR_WILDCARD,source/$(ATMOSPHERE_BOARD_DIR))
|
||||
SOURCES += source/$(ATMOSPHERE_BOARD_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_BOARD_DIR))
|
||||
endif
|
||||
ifneq ($(strip $(wildcard source/$(ATMOSPHERE_OS_DIR)/.*)),)
|
||||
SOURCES += $(call DIR_WILDCARD,source/$(ATMOSPHERE_OS_DIR))
|
||||
SOURCES += source/$(ATMOSPHERE_OS_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_OS_DIR))
|
||||
endif
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# Rules for compiling pre-compiled headers
|
||||
#---------------------------------------------------------------------------------
|
||||
%.gch: %.hpp
|
||||
@echo $<
|
||||
$(CXX) -w -x c++-header -MMD -MP -MF $(DEPSDIR)/$*.d $(CXXFLAGS) -c $< -o $@ $(ERROR_FILTER)
|
||||
@cp $@ $(<).gch
|
||||
|
|
|
@ -7,12 +7,12 @@ include $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/../common.mk
|
|||
# options for code generation
|
||||
#---------------------------------------------------------------------------------
|
||||
export DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE
|
||||
export SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror
|
||||
export SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror -fno-non-call-exceptions
|
||||
export CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
|
||||
export CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS)
|
||||
export CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit
|
||||
export ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES)
|
||||
|
||||
export LDFLAGS = -specs=$(TOPDIR)/kernel_ldr.specs -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map)
|
||||
export LDFLAGS = -specs=$(TOPDIR)/$(notdir $(TOPDIR)).specs -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map) -Wl,-z,relro,-z,now
|
||||
|
||||
export CXXWRAPS := -Wl,--wrap,__cxa_pure_virtual \
|
||||
-Wl,--wrap,__cxa_throw \
|
||||
|
@ -25,7 +25,6 @@ export CXXWRAPS := -Wl,--wrap,__cxa_pure_virtual \
|
|||
-Wl,--wrap,__cxa_call_terminate \
|
||||
-Wl,--wrap,__gxx_personality_v0 \
|
||||
-Wl,--wrap,_Unwind_Resume \
|
||||
-Wl,--wrap,_Unwind_Resume \
|
||||
-Wl,--wrap,_ZSt19__throw_logic_errorPKc \
|
||||
-Wl,--wrap,_ZSt20__throw_length_errorPKc \
|
||||
-Wl,--wrap,_ZNSt11logic_errorC2EPKc
|
||||
|
|
|
@ -6,10 +6,12 @@ include $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/../config/common.mk
|
|||
#---------------------------------------------------------------------------------
|
||||
# options for code generation
|
||||
#---------------------------------------------------------------------------------
|
||||
PRECOMPILED_HEADERS := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/include/mesosphere.hpp
|
||||
|
||||
DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE
|
||||
SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror
|
||||
SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror -fno-non-call-exceptions
|
||||
CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
|
||||
CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -flto
|
||||
CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit -flto
|
||||
ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS)
|
||||
|
||||
LIBS :=
|
||||
|
@ -27,7 +29,7 @@ LIBDIRS := $(ATMOSPHERE_LIBRARIES_DIR)/libvapours
|
|||
ifneq ($(BUILD),$(notdir $(CURDIR)))
|
||||
#---------------------------------------------------------------------------------
|
||||
|
||||
export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) \
|
||||
export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) $(CURDIR)/include \
|
||||
$(foreach dir,$(DATA),$(CURDIR)/$(dir))
|
||||
|
||||
CFILES := $(foreach dir,$(SOURCES),$(filter-out $(notdir $(wildcard $(dir)/*.arch.*.c)) $(notdir $(wildcard $(dir)/*.board.*.c)) $(notdir $(wildcard $(dir)/*.os.*.c)), \
|
||||
|
@ -64,6 +66,7 @@ endif
|
|||
|
||||
export OFILES_BIN := $(addsuffix .o,$(BINFILES))
|
||||
export OFILES_SRC := $(CPPFILES:.cpp=.o) $(CFILES:.c=.o) $(SFILES:.s=.o)
|
||||
export GCH_FILES := $(foreach hdr,$(PRECOMPILED_HEADERS:.hpp=.gch),$(notdir $(hdr)))
|
||||
export OFILES := $(OFILES_BIN) $(OFILES_SRC)
|
||||
export HFILES_BIN := $(addsuffix .h,$(subst .,_,$(BINFILES)))
|
||||
|
||||
|
@ -105,13 +108,15 @@ clean:
|
|||
#---------------------------------------------------------------------------------
|
||||
else
|
||||
|
||||
DEPENDS := $(OFILES:.o=.d)
|
||||
DEPENDS := $(OFILES:.o=.d) $(GCH_FILES:.gch=.d)
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# main targets
|
||||
#---------------------------------------------------------------------------------
|
||||
$(OUTPUT) : $(OFILES)
|
||||
|
||||
$(filter-out kern_svc_tables.o, $(OFILES)) : $(GCH_FILES)
|
||||
|
||||
$(OFILES_SRC) : $(HFILES_BIN)
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
|
|
|
@ -19,20 +19,70 @@
|
|||
#include <vapours.hpp>
|
||||
|
||||
/* First, pull in core macros (panic, etc). */
|
||||
#include "mesosphere/kern_panic.hpp"
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_panic.hpp>
|
||||
|
||||
/* Primitive types. */
|
||||
#include "mesosphere/kern_k_typed_address.hpp"
|
||||
#include "mesosphere/kern_initial_process.hpp"
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include <mesosphere/kern_initial_process.hpp>
|
||||
#include <mesosphere/kern_k_exception_context.hpp>
|
||||
|
||||
/* Core pre-initialization includes. */
|
||||
#include "mesosphere/kern_select_cpu.hpp"
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_select_system_control.hpp>
|
||||
#include <mesosphere/kern_k_target_system.hpp>
|
||||
|
||||
/* Initialization headers. */
|
||||
#include "mesosphere/init/kern_init_elf.hpp"
|
||||
#include "mesosphere/init/kern_init_layout.hpp"
|
||||
#include "mesosphere/init/kern_init_page_table_select.hpp"
|
||||
#include <mesosphere/init/kern_init_elf.hpp>
|
||||
#include <mesosphere/init/kern_init_layout.hpp>
|
||||
#include <mesosphere/init/kern_init_slab_setup.hpp>
|
||||
#include <mesosphere/init/kern_init_page_table_select.hpp>
|
||||
#include <mesosphere/init/kern_init_arguments_select.hpp>
|
||||
#include <mesosphere/kern_k_memory_layout.hpp>
|
||||
|
||||
/* Core functionality. */
|
||||
#include "mesosphere/kern_select_interrupts.hpp"
|
||||
#include "mesosphere/kern_select_k_system_control.hpp"
|
||||
#include <mesosphere/kern_select_interrupt_manager.hpp>
|
||||
#include <mesosphere/kern_k_spin_lock.hpp>
|
||||
#include <mesosphere/kern_k_memory_manager.hpp>
|
||||
#include <mesosphere/kern_k_interrupt_task_manager.hpp>
|
||||
#include <mesosphere/kern_k_core_local_region.hpp>
|
||||
#include <mesosphere/kern_k_slab_heap.hpp>
|
||||
#include <mesosphere/kern_k_light_lock.hpp>
|
||||
#include <mesosphere/kern_k_dpc_manager.hpp>
|
||||
#include <mesosphere/kern_kernel.hpp>
|
||||
#include <mesosphere/kern_k_page_table_manager.hpp>
|
||||
#include <mesosphere/kern_select_page_table.hpp>
|
||||
|
||||
/* Miscellaneous objects. */
|
||||
#include <mesosphere/kern_k_shared_memory_info.hpp>
|
||||
#include <mesosphere/kern_k_event_info.hpp>
|
||||
|
||||
/* Auto Objects. */
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_k_synchronization_object.hpp>
|
||||
#include <mesosphere/kern_k_readable_event.hpp>
|
||||
#include <mesosphere/kern_k_handle_table.hpp>
|
||||
#include <mesosphere/kern_k_event.hpp>
|
||||
#include <mesosphere/kern_k_interrupt_event.hpp>
|
||||
#include <mesosphere/kern_k_light_session.hpp>
|
||||
#include <mesosphere/kern_k_session.hpp>
|
||||
#include <mesosphere/kern_k_session_request.hpp>
|
||||
#include <mesosphere/kern_k_port.hpp>
|
||||
#include <mesosphere/kern_k_shared_memory.hpp>
|
||||
#include <mesosphere/kern_k_transfer_memory.hpp>
|
||||
#include <mesosphere/kern_k_code_memory.hpp>
|
||||
#include <mesosphere/kern_k_device_address_space.hpp>
|
||||
#include <mesosphere/kern_select_debug.hpp>
|
||||
#include <mesosphere/kern_k_process.hpp>
|
||||
#include <mesosphere/kern_k_resource_limit.hpp>
|
||||
#include <mesosphere/kern_k_synchronization.hpp>
|
||||
|
||||
/* More Miscellaneous objects. */
|
||||
#include <mesosphere/kern_k_object_name.hpp>
|
||||
#include <mesosphere/kern_k_scoped_resource_reservation.hpp>
|
||||
|
||||
/* Supervisor Calls. */
|
||||
#include <mesosphere/kern_svc.hpp>
|
||||
|
||||
/* Main functionality. */
|
||||
#include <mesosphere/kern_main.hpp>
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
namespace ams::kern::init {
|
||||
|
||||
struct KInitArguments {
|
||||
u64 ttbr0;
|
||||
u64 ttbr1;
|
||||
u64 tcr;
|
||||
u64 mair;
|
||||
u64 cpuactlr;
|
||||
u64 cpuectlr;
|
||||
u64 sctlr;
|
||||
u64 sp;
|
||||
u64 entrypoint;
|
||||
u64 argument;
|
||||
u64 setup_function;
|
||||
};
|
||||
|
||||
}
|
|
@ -15,173 +15,12 @@
|
|||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include <mesosphere/kern_panic.hpp>
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include "../kern_cpu.hpp"
|
||||
|
||||
namespace ams::kern::init {
|
||||
|
||||
constexpr size_t PageSize = 0x1000;
|
||||
constexpr size_t L1BlockSize = 0x40000000;
|
||||
constexpr size_t L2BlockSize = 0x200000;
|
||||
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
|
||||
constexpr size_t L3BlockSize = 0x1000;
|
||||
constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize;
|
||||
|
||||
class PageTableEntry {
|
||||
public:
|
||||
enum Permission : u64 {
|
||||
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
||||
Permission_KernelRX = ((0ul << 53) | (1ul << 54) | (2ul << 6)),
|
||||
Permission_KernelR = ((1ul << 53) | (1ul << 54) | (2ul << 6)),
|
||||
Permission_KernelRW = ((1ul << 53) | (1ul << 54) | (0ul << 6)),
|
||||
|
||||
Permission_UserRX = ((1ul << 53) | (0ul << 54) | (3ul << 6)),
|
||||
Permission_UserR = ((1ul << 53) | (1ul << 54) | (3ul << 6)),
|
||||
Permission_UserRW = ((1ul << 53) | (1ul << 54) | (1ul << 6)),
|
||||
};
|
||||
|
||||
enum Shareable : u64 {
|
||||
Shareable_NonShareable = (0 << 8),
|
||||
Shareable_OuterShareable = (2 << 8),
|
||||
Shareable_InnerShareable = (3 << 8),
|
||||
};
|
||||
|
||||
/* Official attributes are: */
|
||||
/* 0x00, 0x04, 0xFF, 0x44. 4-7 are unused. */
|
||||
enum PageAttribute : u64 {
|
||||
PageAttribute_Device_nGnRnE = (0 << 2),
|
||||
PageAttribute_Device_nGnRE = (1 << 2),
|
||||
PageAttribute_NormalMemory = (2 << 2),
|
||||
PageAttribute_NormalMemoryNotCacheable = (3 << 2),
|
||||
};
|
||||
|
||||
enum AccessFlag : u64 {
|
||||
AccessFlag_NotAccessed = (0 << 10),
|
||||
AccessFlag_Accessed = (1 << 10),
|
||||
};
|
||||
protected:
|
||||
u64 attributes;
|
||||
public:
|
||||
/* Take in a raw attribute. */
|
||||
constexpr ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ }
|
||||
|
||||
/* Extend a previous attribute. */
|
||||
constexpr ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ }
|
||||
|
||||
/* Construct a new attribute. */
|
||||
constexpr ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share)
|
||||
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share))
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||
return (this->attributes >> offset) & ((1ul << count) - 1);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const {
|
||||
return this->attributes & (((1ul << count) - 1) << offset);
|
||||
}
|
||||
public:
|
||||
constexpr ALWAYS_INLINE bool IsUserExecuteNever() const { return this->GetBits(54, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast<AccessFlag>(this->GetBits(10, 1)); }
|
||||
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->GetBits(8, 2)); }
|
||||
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->GetBits(2, 3)); }
|
||||
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x1; }
|
||||
constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBits(0, 2) == 0x3; }
|
||||
|
||||
/* Should not be called except by derived classes. */
|
||||
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
|
||||
return this->attributes;
|
||||
}
|
||||
};
|
||||
|
||||
static_assert(sizeof(PageTableEntry) == sizeof(u64));
|
||||
|
||||
constexpr PageTableEntry InvalidPageTableEntry = PageTableEntry(0);
|
||||
|
||||
constexpr size_t MaxPageTableEntries = PageSize / sizeof(PageTableEntry);
|
||||
|
||||
class L1PageTableEntry : public PageTableEntry {
|
||||
public:
|
||||
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool pxn)
|
||||
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||
return this->SelectBits(30, 18);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
||||
return this->SelectBits(12, 36);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||
return L1PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
||||
}
|
||||
};
|
||||
|
||||
class L2PageTableEntry : public PageTableEntry {
|
||||
public:
|
||||
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool pxn)
|
||||
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||
return this->SelectBits(21, 27);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
||||
return this->SelectBits(12, 36);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||
return L2PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
||||
}
|
||||
};
|
||||
|
||||
class L3PageTableEntry : public PageTableEntry {
|
||||
public:
|
||||
constexpr ALWAYS_INLINE L3PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x3)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x3; }
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||
return this->SelectBits(12, 36);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||
return L3PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
||||
}
|
||||
};
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/arch/arm64/kern_k_page_table_entry.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64::init {
|
||||
|
||||
class KInitialPageTable {
|
||||
public:
|
||||
|
@ -190,10 +29,14 @@ namespace ams::kern::init {
|
|||
virtual KPhysicalAddress Allocate() { return Null<KPhysicalAddress>; }
|
||||
virtual void Free(KPhysicalAddress phys_addr) { /* Nothing to do here. */ (void)(phys_addr); }
|
||||
};
|
||||
|
||||
struct NoClear{};
|
||||
private:
|
||||
KPhysicalAddress l1_table;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : l1_table(l1) {
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : l1_table(l1) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) {
|
||||
ClearNewPageTable(this->l1_table);
|
||||
}
|
||||
|
||||
|
@ -224,9 +67,9 @@ namespace ams::kern::init {
|
|||
public:
|
||||
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) {
|
||||
/* Ensure that addresses and sizes are page aligned. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
|
||||
/* Iteratively map pages until the requested region is mapped. */
|
||||
while (size > 0) {
|
||||
|
@ -309,10 +152,37 @@ namespace ams::kern::init {
|
|||
}
|
||||
}
|
||||
|
||||
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
|
||||
/* Get the L1 entry. */
|
||||
const L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
|
||||
if (l1_entry->IsBlock()) {
|
||||
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
|
||||
}
|
||||
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
|
||||
|
||||
/* Get the L2 entry. */
|
||||
const L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||
|
||||
if (l2_entry->IsBlock()) {
|
||||
return l2_entry->GetBlock() + (GetInteger(virt_addr) & (L2BlockSize - 1));
|
||||
}
|
||||
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
|
||||
|
||||
/* Get the L3 entry. */
|
||||
const L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
|
||||
|
||||
return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1));
|
||||
}
|
||||
|
||||
bool IsFree(KVirtualAddress virt_addr, size_t size) {
|
||||
/* Ensure that addresses and sizes are page aligned. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
|
||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||
while (virt_addr < end_virt_addr) {
|
||||
|
@ -360,8 +230,8 @@ namespace ams::kern::init {
|
|||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
||||
/* Ensure that addresses and sizes are page aligned. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
|
||||
/* Iteratively reprotect pages until the requested region is reprotected. */
|
||||
while (size > 0) {
|
||||
|
@ -371,9 +241,9 @@ namespace ams::kern::init {
|
|||
if (l1_entry->IsBlock()) {
|
||||
/* Ensure that we are allowed to have an L1 block here. */
|
||||
const KPhysicalAddress block = l1_entry->GetBlock();
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L1BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L1BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
|
||||
/* Invalidate the existing L1 block. */
|
||||
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
|
||||
|
@ -389,7 +259,7 @@ namespace ams::kern::init {
|
|||
}
|
||||
|
||||
/* Not a block, so we must be a table. */
|
||||
MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable());
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
|
||||
|
||||
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||
if (l2_entry->IsBlock()) {
|
||||
|
@ -397,14 +267,14 @@ namespace ams::kern::init {
|
|||
|
||||
if (l2_entry->IsContiguous()) {
|
||||
/* Ensure that we are allowed to have a contiguous L2 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize));
|
||||
|
||||
/* Invalidate the existing contiguous L2 block. */
|
||||
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||
/* Ensure that the entry is valid. */
|
||||
MESOSPHERE_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
||||
static_cast<PageTableEntry *>(l2_entry)[i] = InvalidPageTableEntry;
|
||||
}
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
@ -419,10 +289,10 @@ namespace ams::kern::init {
|
|||
size -= L2ContiguousBlockSize;
|
||||
} else {
|
||||
/* Ensure that we are allowed to have an L2 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
|
||||
/* Invalidate the existing L2 block. */
|
||||
*static_cast<PageTableEntry *>(l2_entry) = InvalidPageTableEntry;
|
||||
|
@ -440,23 +310,23 @@ namespace ams::kern::init {
|
|||
}
|
||||
|
||||
/* Not a block, so we must be a table. */
|
||||
MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable());
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
|
||||
|
||||
/* We must have a mapped l3 entry to reprotect. */
|
||||
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||
MESOSPHERE_ABORT_UNLESS(l3_entry->IsBlock());
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
|
||||
const KPhysicalAddress block = l3_entry->GetBlock();
|
||||
|
||||
if (l3_entry->IsContiguous()) {
|
||||
/* Ensure that we are allowed to have a contiguous L3 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize));
|
||||
|
||||
/* Invalidate the existing contiguous L3 block. */
|
||||
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||
/* Ensure that the entry is valid. */
|
||||
MESOSPHERE_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
||||
static_cast<PageTableEntry *>(l3_entry)[i] = InvalidPageTableEntry;
|
||||
}
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
@ -471,10 +341,10 @@ namespace ams::kern::init {
|
|||
size -= L3ContiguousBlockSize;
|
||||
} else {
|
||||
/* Ensure that we are allowed to have an L3 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
|
||||
/* Invalidate the existing L3 block. */
|
||||
*static_cast<PageTableEntry *>(l3_entry) = InvalidPageTableEntry;
|
||||
|
@ -495,4 +365,35 @@ namespace ams::kern::init {
|
|||
|
||||
};
|
||||
|
||||
class KInitialPageAllocator : public KInitialPageTable::IPageAllocator {
|
||||
private:
|
||||
uintptr_t next_address;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KInitialPageAllocator() : next_address(Null<uintptr_t>) { /* ... */ }
|
||||
|
||||
ALWAYS_INLINE void Initialize(uintptr_t address) {
|
||||
this->next_address = address;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uintptr_t GetFinalNextAddress() {
|
||||
const uintptr_t final_address = this->next_address;
|
||||
this->next_address = Null<uintptr_t>;
|
||||
return final_address;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uintptr_t GetFinalState() {
|
||||
return this->GetFinalNextAddress();
|
||||
}
|
||||
public:
|
||||
virtual KPhysicalAddress Allocate() override {
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(this->next_address != Null<uintptr_t>);
|
||||
const uintptr_t allocated = this->next_address;
|
||||
this->next_address += PageSize;
|
||||
std::memset(reinterpret_cast<void *>(allocated), 0, PageSize);
|
||||
return allocated;
|
||||
}
|
||||
|
||||
/* No need to override free. The default does nothing, and so would we. */
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -15,9 +15,27 @@
|
|||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include "kern_cpu_system_registers.hpp"
|
||||
#include <mesosphere/arch/arm64/kern_cpu_system_registers.hpp>
|
||||
#include <mesosphere/kern_select_userspace_memory_access.hpp>
|
||||
|
||||
namespace ams::kern::arm64::cpu {
|
||||
namespace ams::kern::arch::arm64::cpu {
|
||||
|
||||
#if defined(ATMOSPHERE_CPU_ARM_CORTEX_A57) || defined(ATMOSPHERE_CPU_ARM_CORTEX_A53)
|
||||
constexpr inline size_t InstructionCacheLineSize = 0x40;
|
||||
constexpr inline size_t DataCacheLineSize = 0x40;
|
||||
constexpr inline size_t NumPerformanceCounters = 6;
|
||||
#else
|
||||
#error "Unknown CPU for cache line sizes"
|
||||
#endif
|
||||
|
||||
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
|
||||
constexpr inline size_t NumCores = 4;
|
||||
#else
|
||||
#error "Unknown Board for cpu::NumCores"
|
||||
#endif
|
||||
|
||||
/* Initialization. */
|
||||
NOINLINE void InitializeInterruptThreads(s32 core_id);
|
||||
|
||||
/* Helpers for managing memory state. */
|
||||
ALWAYS_INLINE void DataSynchronizationBarrier() {
|
||||
|
@ -46,13 +64,154 @@ namespace ams::kern::arm64::cpu {
|
|||
EnsureInstructionConsistency();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SwitchProcess(u64 ttbr, u32 proc_id) {
|
||||
SetTtbr0El1(ttbr);
|
||||
ContextIdRegisterAccessor(0).SetProcId(proc_id).Store();
|
||||
InstructionMemoryBarrier();
|
||||
}
|
||||
|
||||
/* Performance counter helpers. */
|
||||
ALWAYS_INLINE u64 GetCycleCounter() {
|
||||
return cpu::GetPmcCntrEl0();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u32 GetPerformanceCounter(s32 n) {
|
||||
u64 counter = 0;
|
||||
if (n < static_cast<s32>(NumPerformanceCounters)) {
|
||||
switch (n) {
|
||||
case 0:
|
||||
counter = cpu::GetPmevCntr0El0();
|
||||
break;
|
||||
case 1:
|
||||
counter = cpu::GetPmevCntr1El0();
|
||||
break;
|
||||
case 2:
|
||||
counter = cpu::GetPmevCntr2El0();
|
||||
break;
|
||||
case 3:
|
||||
counter = cpu::GetPmevCntr3El0();
|
||||
break;
|
||||
case 4:
|
||||
counter = cpu::GetPmevCntr4El0();
|
||||
break;
|
||||
case 5:
|
||||
counter = cpu::GetPmevCntr5El0();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return static_cast<u32>(counter);
|
||||
}
|
||||
|
||||
/* Helper for address access. */
|
||||
ALWAYS_INLINE bool GetPhysicalAddressWritable(KPhysicalAddress *out, KVirtualAddress addr, bool privileged = false) {
|
||||
const uintptr_t va = GetInteger(addr);
|
||||
|
||||
if (privileged) {
|
||||
__asm__ __volatile__("at s1e1w, %[va]" :: [va]"r"(va) : "memory");
|
||||
} else {
|
||||
__asm__ __volatile__("at s1e0w, %[va]" :: [va]"r"(va) : "memory");
|
||||
}
|
||||
InstructionMemoryBarrier();
|
||||
|
||||
u64 par = GetParEl1();
|
||||
|
||||
if (par & 0x1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (out) {
|
||||
*out = KPhysicalAddress((par & 0xFFFFFFFFF000ull) | (va & 0xFFFull));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool GetPhysicalAddressReadable(KPhysicalAddress *out, KVirtualAddress addr, bool privileged = false) {
|
||||
const uintptr_t va = GetInteger(addr);
|
||||
|
||||
if (privileged) {
|
||||
__asm__ __volatile__("at s1e1r, %[va]" :: [va]"r"(va) : "memory");
|
||||
} else {
|
||||
__asm__ __volatile__("at s1e0r, %[va]" :: [va]"r"(va) : "memory");
|
||||
}
|
||||
InstructionMemoryBarrier();
|
||||
|
||||
u64 par = GetParEl1();
|
||||
|
||||
if (par & 0x1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (out) {
|
||||
*out = KPhysicalAddress((par & 0xFFFFFFFFF000ull) | (va & 0xFFFull));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Synchronization helpers. */
|
||||
NOINLINE void SynchronizeAllCores();
|
||||
|
||||
/* Cache management helpers. */
|
||||
void FlushEntireDataCacheShared();
|
||||
void FlushEntireDataCacheLocal();
|
||||
void ClearPageToZeroImpl(void *);
|
||||
void FlushEntireDataCacheSharedForInit();
|
||||
void FlushEntireDataCacheLocalForInit();
|
||||
|
||||
void FlushEntireDataCache();
|
||||
|
||||
Result InvalidateDataCache(void *addr, size_t size);
|
||||
Result StoreDataCache(const void *addr, size_t size);
|
||||
Result FlushDataCache(const void *addr, size_t size);
|
||||
Result InvalidateInstructionCache(void *addr, size_t size);
|
||||
|
||||
ALWAYS_INLINE void ClearPageToZero(void *page) {
|
||||
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(page), PageSize));
|
||||
MESOSPHERE_ASSERT(page != nullptr);
|
||||
ClearPageToZeroImpl(page);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InvalidateTlbByAsid(u32 asid) {
|
||||
const u64 value = (static_cast<u64>(asid) << 48);
|
||||
__asm__ __volatile__("tlbi aside1is, %[value]" :: [value]"r"(static_cast<u64>(value) << 48) : "memory");
|
||||
EnsureInstructionConsistency();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InvalidateTlbByAsidAndVa(u32 asid, KProcessAddress virt_addr) {
|
||||
const u64 value = (static_cast<u64>(asid) << 48) | ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul);
|
||||
__asm__ __volatile__("tlbi aside1is, %[value]" :: [value]"r"(value) : "memory");
|
||||
EnsureInstructionConsistency();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InvalidateEntireTlb() {
|
||||
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
|
||||
EnsureInstructionConsistency();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InvalidateEntireTlbDataOnly() {
|
||||
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
|
||||
DataSynchronizationBarrier();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InvalidateTlbByVaDataOnly(KProcessAddress virt_addr) {
|
||||
const u64 value = ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul);
|
||||
__asm__ __volatile__("tlbi vaae1is, %[value]" :: [value]"r"(value) : "memory");
|
||||
DataSynchronizationBarrier();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uintptr_t GetCoreLocalRegionAddress() {
|
||||
register uintptr_t x18 asm("x18");
|
||||
__asm__ __volatile__("" : [x18]"=r"(x18));
|
||||
return x18;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SetCoreLocalRegionAddress(uintptr_t value) {
|
||||
register uintptr_t x18 asm("x18") = value;
|
||||
__asm__ __volatile__("":: [x18]"r"(x18));
|
||||
SetTpidrEl1(value);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SwitchThreadLocalRegion(uintptr_t tlr) {
|
||||
cpu::SetTpidrRoEl0(tlr);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
|
||||
namespace ams::kern::arm64::cpu {
|
||||
namespace ams::kern::arch::arm64::cpu {
|
||||
|
||||
#define MESOSPHERE_CPU_GET_SYSREG(name) \
|
||||
({ \
|
||||
|
@ -37,8 +37,14 @@ namespace ams::kern::arm64::cpu {
|
|||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr0El1, ttbr0_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr1El1, ttbr1_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrEl1, tpidr_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(VbarEl1, vbar_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(FarEl1, far_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(ParEl1, par_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SctlrEl1, sctlr_el1)
|
||||
|
||||
|
@ -46,21 +52,215 @@ namespace ams::kern::arm64::cpu {
|
|||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CpuEctlrEl1, s3_1_c15_c2_1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CsselrEl1, csselr_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CcsidrEl1, ccsidr_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(OslarEl1, oslar_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrRoEl0, tpidrro_el0)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(EsrEl1, esr_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Afsr0El1, afsr0_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Afsr1El1, afsr1_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmUserEnrEl0, pmuserenr_el0)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmcCntrEl0, pmccntr_el0)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr0El0, pmevcntr0_el0)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr1El0, pmevcntr1_el0)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr2El0, pmevcntr2_el0)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr3El0, pmevcntr3_el0)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr4El0, pmevcntr4_el0)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr5El0, pmevcntr5_el0)
|
||||
|
||||
#define FOR_I_IN_0_TO_15(HANDLER, ...) \
|
||||
HANDLER(0, ## __VA_ARGS__) HANDLER(1, ## __VA_ARGS__) HANDLER(2, ## __VA_ARGS__) HANDLER(3, ## __VA_ARGS__) \
|
||||
HANDLER(4, ## __VA_ARGS__) HANDLER(5, ## __VA_ARGS__) HANDLER(6, ## __VA_ARGS__) HANDLER(7, ## __VA_ARGS__) \
|
||||
HANDLER(8, ## __VA_ARGS__) HANDLER(9, ## __VA_ARGS__) HANDLER(10, ## __VA_ARGS__) HANDLER(11, ## __VA_ARGS__) \
|
||||
HANDLER(12, ## __VA_ARGS__) HANDLER(13, ## __VA_ARGS__) HANDLER(14, ## __VA_ARGS__) HANDLER(15, ## __VA_ARGS__) \
|
||||
|
||||
#define MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS(ID, ...) \
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgWcr##ID##El1, dbgwcr##ID##_el1) \
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgWvr##ID##El1, dbgwvr##ID##_el1) \
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgBcr##ID##El1, dbgbcr##ID##_el1) \
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgBvr##ID##El1, dbgbvr##ID##_el1)
|
||||
|
||||
FOR_I_IN_0_TO_15(MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS)
|
||||
|
||||
#undef MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS
|
||||
|
||||
/* Base class for register accessors. */
|
||||
class GenericRegisterAccessor {
|
||||
class GenericRegisterAccessorBase {
|
||||
NON_COPYABLE(GenericRegisterAccessorBase);
|
||||
NON_MOVEABLE(GenericRegisterAccessorBase);
|
||||
private:
|
||||
u64 value;
|
||||
public:
|
||||
ALWAYS_INLINE GenericRegisterAccessor(u64 v) : value(v) { /* ... */ }
|
||||
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : value(v) { /* ... */ }
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE u64 GetValue() const {
|
||||
return this->value;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||
return (this->value >> offset) & ((1ul << count) - 1);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
||||
const u64 mask = ((1ul << count) - 1) << offset;
|
||||
this->value &= ~mask;
|
||||
this->value |= (value & (mask >> offset)) << offset;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
|
||||
const u64 mask = ((1ul << count) - 1) << offset;
|
||||
this->value &= ~mask;
|
||||
this->value |= (value & mask);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
||||
const u64 mask = 1ul << offset;
|
||||
if (enabled) {
|
||||
this->value |= mask;
|
||||
} else {
|
||||
this->value &= ~mask;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/* Special code for main id register. */
|
||||
class MainIdRegisterAccessor : public GenericRegisterAccessor {
|
||||
template<typename Derived>
|
||||
class GenericRegisterAccessor : public GenericRegisterAccessorBase {
|
||||
public:
|
||||
constexpr ALWAYS_INLINE GenericRegisterAccessor(u64 v) : GenericRegisterAccessorBase(v) { /* ... */ }
|
||||
protected:
|
||||
ALWAYS_INLINE void Store() const {
|
||||
static_cast<const Derived *>(this)->Store();
|
||||
}
|
||||
};
|
||||
|
||||
#define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(name) class name##RegisterAccessor : public GenericRegisterAccessor<name##RegisterAccessor>
|
||||
|
||||
#define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(accessor, reg_name) \
|
||||
ALWAYS_INLINE accessor##RegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(reg_name)) { /* ... */ } \
|
||||
constexpr ALWAYS_INLINE accessor##RegisterAccessor(u64 v) : GenericRegisterAccessor(v) { /* ... */ } \
|
||||
\
|
||||
ALWAYS_INLINE void Store() { const u64 v = this->GetValue(); MESOSPHERE_CPU_SET_SYSREG(reg_name, v); }
|
||||
|
||||
/* Accessors. */
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MemoryAccessIndirection) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MemoryAccessIndirection, mair_el1)
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(TranslationControl) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(TranslationControl, tcr_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetT1Size() const {
|
||||
const size_t shift_value = this->GetBits(16, 6);
|
||||
return size_t(1) << (size_t(64) - shift_value);
|
||||
}
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ArchitecturalFeatureAccessControl) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ArchitecturalFeatureAccessControl, cpacr_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetFpEnabled(bool en) {
|
||||
if (en) {
|
||||
this->SetBits(20, 2, 0x3);
|
||||
} else {
|
||||
this->SetBits(20, 2, 0x0);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsFpEnabled() {
|
||||
return this->GetBits(20, 2) != 0;
|
||||
}
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(DebugFeature) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(DebugFeature, id_aa64dfr0_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetNumWatchpoints() const {
|
||||
return this->GetBits(20, 4);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetNumBreakpoints() const {
|
||||
return this->GetBits(12, 4);
|
||||
}
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MonitorDebugSystemControl) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MonitorDebugSystemControl, mdscr_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE bool GetMde() const {
|
||||
return this->GetBits(15, 1) != 0;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetTdcc() const {
|
||||
return this->GetBits(12, 1) != 0;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetMde(bool set) {
|
||||
this->SetBit(15, set);
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetTdcc(bool set) {
|
||||
this->SetBit(12, set);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MultiprocessorAffinity) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MultiprocessorAffinity, mpidr_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetAff0() const {
|
||||
return this->GetBits(0, 8);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetAff1() const {
|
||||
return this->GetBits(8, 8);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetAff2() const {
|
||||
return this->GetBits(16, 8);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetAff3() const {
|
||||
return this->GetBits(32, 8);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetCpuOnArgument() const {
|
||||
constexpr u64 Mask = 0x000000FF00FFFF00ul;
|
||||
return this->GetValue() & Mask;
|
||||
}
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ThreadId) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ThreadId, tpidr_el1)
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(OsLockAccess) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(OsLockAccess, oslar_el1)
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ContextId) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ContextId, contextidr_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetProcId(u32 proc_id) {
|
||||
this->SetBits(0, BITSIZEOF(proc_id), proc_id);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MainId) {
|
||||
public:
|
||||
enum class Implementer {
|
||||
ArmLimited = 0x41,
|
||||
|
@ -70,7 +270,7 @@ namespace ams::kern::arm64::cpu {
|
|||
CortexA57 = 0xD07,
|
||||
};
|
||||
public:
|
||||
ALWAYS_INLINE MainIdRegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(midr_el1)) { /* ... */ }
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MainId, midr_el1)
|
||||
public:
|
||||
constexpr ALWAYS_INLINE Implementer GetImplementer() const {
|
||||
return static_cast<Implementer>(this->GetBits(24, 8));
|
||||
|
@ -93,10 +293,69 @@ namespace ams::kern::arm64::cpu {
|
|||
}
|
||||
};
|
||||
|
||||
/* Accessors for cache registers. */
|
||||
class CacheLineIdAccessor : public GenericRegisterAccessor {
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(SystemControl) {
|
||||
public:
|
||||
ALWAYS_INLINE CacheLineIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(clidr_el1)) { /* ... */ }
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(SystemControl, sctlr_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetWxn(bool en) {
|
||||
this->SetBit(19, en);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
/* Accessors for timer registers. */
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerKernelControl) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerKernelControl, cntkctl_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetEl0PctEn(bool en) {
|
||||
this->SetBit(0, en);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalTimerControl) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalTimerControl, cntp_ctl_el0)
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetEnable(bool en) {
|
||||
this->SetBit(0, en);
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetIMask(bool en) {
|
||||
this->SetBit(1, en);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalTimerCompareValue) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalTimerCompareValue, cntp_cval_el0)
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetCompareValue() {
|
||||
return this->GetValue();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetCompareValue(u64 value) {
|
||||
this->SetBits(0, BITSIZEOF(value), value);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalCountValue) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalCountValue, cntpct_el0)
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetCount() {
|
||||
return this->GetValue();
|
||||
}
|
||||
};
|
||||
|
||||
/* Accessors for cache registers. */
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheLineId) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheLineId, clidr_el1)
|
||||
public:
|
||||
constexpr ALWAYS_INLINE int GetLevelsOfCoherency() const {
|
||||
return static_cast<int>(this->GetBits(24, 3));
|
||||
|
@ -109,9 +368,9 @@ namespace ams::kern::arm64::cpu {
|
|||
/* TODO: Other bitfield accessors? */
|
||||
};
|
||||
|
||||
class CacheSizeIdAccessor : public GenericRegisterAccessor {
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheSizeId) {
|
||||
public:
|
||||
ALWAYS_INLINE CacheSizeIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(ccsidr_el1)) { /* ... */ }
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheSizeId, ccsidr_el1)
|
||||
public:
|
||||
constexpr ALWAYS_INLINE int GetNumberOfSets() const {
|
||||
return static_cast<int>(this->GetBits(13, 15));
|
||||
|
@ -128,6 +387,9 @@ namespace ams::kern::arm64::cpu {
|
|||
/* TODO: Other bitfield accessors? */
|
||||
};
|
||||
|
||||
#undef FOR_I_IN_0_TO_15
|
||||
#undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS
|
||||
#undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS
|
||||
#undef MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS
|
||||
#undef MESOSPHERE_CPU_GET_SYSREG
|
||||
#undef MESOSPHERE_CPU_SET_SYSREG
|
||||
|
|
|
@ -14,25 +14,23 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_debug_base.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KSystemControl {
|
||||
public:
|
||||
class Init {
|
||||
public:
|
||||
/* Initialization. */
|
||||
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
||||
static bool ShouldIncreaseThreadResourceLimit();
|
||||
class KThread;
|
||||
class KProcess;
|
||||
|
||||
/* Randomness. */
|
||||
static void GenerateRandomBytes(void *dst, size_t size);
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
};
|
||||
}
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KDebugBase> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KDebug, KSynchronizationObject);
|
||||
public:
|
||||
/* Panic. */
|
||||
static NORETURN void StopSystem();
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
struct KExceptionContext {
|
||||
u64 x[(30 - 0) + 1];
|
||||
u64 sp;
|
||||
u64 pc;
|
||||
u64 psr;
|
||||
u64 tpidr;
|
||||
u64 reserved;
|
||||
};
|
||||
static_assert(sizeof(KExceptionContext) == 0x120);
|
||||
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_hardware_timer_base.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class KHardwareTimerInterruptTask;
|
||||
|
||||
}
|
||||
|
||||
class KHardwareTimer : public KHardwareTimerBase {
|
||||
public:
|
||||
constexpr KHardwareTimer() : KHardwareTimerBase() { /* ... */ }
|
||||
public:
|
||||
/* Public API. */
|
||||
NOINLINE void Initialize(s32 core_id);
|
||||
NOINLINE void Finalize();
|
||||
|
||||
static s64 GetTick() {
|
||||
return GetCount();
|
||||
}
|
||||
|
||||
void RegisterAbsoluteTask(KTimerTask *task, s64 task_time) {
|
||||
KScopedDisableDispatch dd;
|
||||
KScopedSpinLock lk(this->GetLock());
|
||||
|
||||
if (this->RegisterAbsoluteTaskImpl(task, task_time)) {
|
||||
SetCompareValue(task_time);
|
||||
EnableInterrupt();
|
||||
}
|
||||
}
|
||||
private:
|
||||
friend class impl::KHardwareTimerInterruptTask;
|
||||
NOINLINE void DoInterruptTask();
|
||||
private:
|
||||
/* Hardware register accessors. */
|
||||
static ALWAYS_INLINE void InitializeGlobalTimer() {
|
||||
/* Set kernel control. */
|
||||
cpu::CounterTimerKernelControlRegisterAccessor(0).SetEl0PctEn(true).Store();
|
||||
|
||||
/* Disable the physical timer. */
|
||||
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(false).SetIMask(false).Store();
|
||||
|
||||
/* Set the compare value to the maximum. */
|
||||
cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(std::numeric_limits<u64>::max()).Store();
|
||||
|
||||
/* Enable the physical timer, with interrupt masked. */
|
||||
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(true).Store();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void EnableInterrupt() {
|
||||
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(false).Store();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void DisableInterrupt() {
|
||||
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(true).Store();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void StopTimer() {
|
||||
/* Set the compare value to the maximum. */
|
||||
cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(std::numeric_limits<u64>::max()).Store();
|
||||
|
||||
/* Disable the physical timer. */
|
||||
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(false).SetIMask(false).Store();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE s64 GetCount() {
|
||||
return cpu::CounterTimerPhysicalCountValueRegisterAccessor().GetCount();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void SetCompareValue(s64 value) {
|
||||
cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(static_cast<u64>(value)).Store();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
struct GicDistributor {
|
||||
u32 ctlr;
|
||||
u32 typer;
|
||||
u32 iidr;
|
||||
u32 reserved_0x0c;
|
||||
u32 statusr;
|
||||
u32 reserved_0x14[3];
|
||||
u32 impldef_0x20[8];
|
||||
u32 setspi_nsr;
|
||||
u32 reserved_0x44;
|
||||
u32 clrspi_nsr;
|
||||
u32 reserved_0x4c;
|
||||
u32 setspi_sr;
|
||||
u32 reserved_0x54;
|
||||
u32 clrspi_sr;
|
||||
u32 reserved_0x5c[9];
|
||||
u32 igroupr[32];
|
||||
u32 isenabler[32];
|
||||
u32 icenabler[32];
|
||||
u32 ispendr[32];
|
||||
u32 icpendr[32];
|
||||
u32 isactiver[32];
|
||||
u32 icactiver[32];
|
||||
union {
|
||||
u8 bytes[1020];
|
||||
u32 words[255];
|
||||
} ipriorityr;
|
||||
u32 _0x7fc;
|
||||
union {
|
||||
u8 bytes[1020];
|
||||
u32 words[255];
|
||||
} itargetsr;
|
||||
u32 _0xbfc;
|
||||
u32 icfgr[64];
|
||||
u32 igrpmodr[32];
|
||||
u32 _0xd80[32];
|
||||
u32 nsacr[64];
|
||||
u32 sgir;
|
||||
u32 _0xf04[3];
|
||||
u32 cpendsgir[4];
|
||||
u32 spendsgir[4];
|
||||
u32 reserved_0xf30[52];
|
||||
|
||||
static constexpr size_t SgirCpuTargetListShift = 16;
|
||||
|
||||
enum SgirTargetListFilter : u32 {
|
||||
SgirTargetListFilter_CpuTargetList = (0 << 24),
|
||||
SgirTargetListFilter_Others = (1 << 24),
|
||||
SgirTargetListFilter_Self = (2 << 24),
|
||||
SgirTargetListFilter_Reserved = (3 << 24),
|
||||
};
|
||||
};
|
||||
static_assert(std::is_pod<GicDistributor>::value);
|
||||
static_assert(sizeof(GicDistributor) == 0x1000);
|
||||
|
||||
struct GicCpuInterface {
|
||||
u32 ctlr;
|
||||
u32 pmr;
|
||||
u32 bpr;
|
||||
u32 iar;
|
||||
u32 eoir;
|
||||
u32 rpr;
|
||||
u32 hppir;
|
||||
u32 abpr;
|
||||
u32 aiar;
|
||||
u32 aeoir;
|
||||
u32 ahppir;
|
||||
u32 statusr;
|
||||
u32 reserved_30[4];
|
||||
u32 impldef_40[36];
|
||||
u32 apr[4];
|
||||
u32 nsapr[4];
|
||||
u32 reserved_f0[3];
|
||||
u32 iidr;
|
||||
u32 reserved_100[960];
|
||||
u32 dir;
|
||||
u32 _0x1004[1023];
|
||||
};
|
||||
static_assert(std::is_pod<GicCpuInterface>::value);
|
||||
static_assert(sizeof(GicCpuInterface) == 0x2000);
|
||||
|
||||
struct KInterruptController {
|
||||
NON_COPYABLE(KInterruptController);
|
||||
NON_MOVEABLE(KInterruptController);
|
||||
public:
|
||||
static constexpr s32 NumSoftwareInterrupts = 16;
|
||||
static constexpr s32 NumLocalInterrupts = NumSoftwareInterrupts + 16;
|
||||
static constexpr s32 NumGlobalInterrupts = 988;
|
||||
static constexpr s32 NumInterrupts = NumLocalInterrupts + NumGlobalInterrupts;
|
||||
static constexpr s32 NumPriorityLevels = 4;
|
||||
public:
|
||||
struct LocalState {
|
||||
u32 local_isenabler[NumLocalInterrupts / 32];
|
||||
u32 local_ipriorityr[NumLocalInterrupts / 4];
|
||||
u32 local_targetsr[NumLocalInterrupts / 4];
|
||||
u32 local_icfgr[NumLocalInterrupts / 16];
|
||||
};
|
||||
|
||||
struct GlobalState {
|
||||
u32 global_isenabler[NumGlobalInterrupts / 32];
|
||||
u32 global_ipriorityr[NumGlobalInterrupts / 4];
|
||||
u32 global_targetsr[NumGlobalInterrupts / 4];
|
||||
u32 global_icfgr[NumGlobalInterrupts / 16];
|
||||
};
|
||||
|
||||
enum PriorityLevel : u8 {
|
||||
PriorityLevel_High = 0,
|
||||
PriorityLevel_Low = NumPriorityLevels - 1,
|
||||
|
||||
PriorityLevel_Timer = 1,
|
||||
PriorityLevel_Scheduler = 2,
|
||||
};
|
||||
private:
|
||||
static inline u32 s_mask[cpu::NumCores];
|
||||
private:
|
||||
volatile GicDistributor *gicd;
|
||||
volatile GicCpuInterface *gicc;
|
||||
public:
|
||||
constexpr KInterruptController() : gicd(nullptr), gicc(nullptr) { /* ... */ }
|
||||
|
||||
void Initialize(s32 core_id);
|
||||
void Finalize(s32 core_id);
|
||||
public:
|
||||
u32 GetIrq() const {
|
||||
return this->gicc->iar;
|
||||
}
|
||||
|
||||
static constexpr s32 ConvertRawIrq(u32 irq) {
|
||||
return (irq == 0x3FF) ? -1 : (irq & 0x3FF);
|
||||
}
|
||||
|
||||
void Enable(s32 irq) const {
|
||||
this->gicd->isenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
}
|
||||
|
||||
void Disable(s32 irq) const {
|
||||
this->gicd->icenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
}
|
||||
|
||||
void Clear(s32 irq) const {
|
||||
this->gicd->icpendr[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
}
|
||||
|
||||
void SetTarget(s32 irq, s32 core_id) const {
|
||||
this->gicd->itargetsr.bytes[irq] |= GetGicMask(core_id);
|
||||
}
|
||||
|
||||
void ClearTarget(s32 irq, s32 core_id) const {
|
||||
this->gicd->itargetsr.bytes[irq] &= ~GetGicMask(core_id);
|
||||
}
|
||||
|
||||
void SetPriorityLevel(s32 irq, s32 level) const {
|
||||
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
|
||||
this->gicd->ipriorityr.bytes[irq] = ToGicPriorityValue(level);
|
||||
}
|
||||
|
||||
s32 GetPriorityLevel(s32 irq) const {
|
||||
return FromGicPriorityValue(this->gicd->ipriorityr.bytes[irq]);
|
||||
}
|
||||
|
||||
void SetPriorityLevel(s32 level) const {
|
||||
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
|
||||
this->gicc->pmr = ToGicPriorityValue(level);
|
||||
}
|
||||
|
||||
void SetEdge(s32 irq) const {
|
||||
u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
||||
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
cfg |= (0x2 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
||||
}
|
||||
|
||||
void SetLevel(s32 irq) const {
|
||||
u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
||||
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
cfg |= (0x0 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
||||
}
|
||||
|
||||
void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
|
||||
MESOSPHERE_ASSERT(IsSoftware(irq));
|
||||
this->gicd->sgir = GetCpuTargetListMask(irq, core_mask);
|
||||
}
|
||||
|
||||
void SendInterProcessorInterrupt(s32 irq) {
|
||||
MESOSPHERE_ASSERT(IsSoftware(irq));
|
||||
this->gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq;
|
||||
}
|
||||
|
||||
void EndOfInterrupt(u32 irq) const {
|
||||
this->gicc->eoir = irq;
|
||||
}
|
||||
|
||||
bool IsInterruptDefined(s32 irq) {
|
||||
const s32 num_interrupts = std::min(32 + 32 * (this->gicd->typer & 0x1F), static_cast<u32>(NumInterrupts));
|
||||
return (0 <= irq && irq < num_interrupts);
|
||||
}
|
||||
|
||||
/* TODO: Implement more KInterruptController functionality. */
|
||||
public:
|
||||
static constexpr ALWAYS_INLINE bool IsSoftware(s32 id) {
|
||||
MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts);
|
||||
return id < NumSoftwareInterrupts;
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE bool IsLocal(s32 id) {
|
||||
MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts);
|
||||
return id < NumLocalInterrupts;
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE bool IsGlobal(s32 id) {
|
||||
MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts);
|
||||
return NumLocalInterrupts <= id;
|
||||
}
|
||||
|
||||
static constexpr size_t GetGlobalInterruptIndex(s32 id) {
|
||||
MESOSPHERE_ASSERT(IsGlobal(id));
|
||||
return id - NumLocalInterrupts;
|
||||
}
|
||||
|
||||
static constexpr size_t GetLocalInterruptIndex(s32 id) {
|
||||
MESOSPHERE_ASSERT(IsLocal(id));
|
||||
return id;
|
||||
}
|
||||
private:
|
||||
static constexpr size_t PriorityShift = BITSIZEOF(u8) - __builtin_ctz(NumPriorityLevels);
|
||||
static_assert(PriorityShift < BITSIZEOF(u8));
|
||||
|
||||
static constexpr ALWAYS_INLINE u8 ToGicPriorityValue(s32 level) {
|
||||
return (level << PriorityShift) | ((1 << PriorityShift) - 1);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE s32 FromGicPriorityValue(u8 priority) {
|
||||
return (priority >> PriorityShift) & (NumPriorityLevels - 1);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE s32 GetCpuTargetListMask(s32 irq, u64 core_mask) {
|
||||
MESOSPHERE_ASSERT(IsSoftware(irq));
|
||||
MESOSPHERE_ASSERT(core_mask < (1ul << cpu::NumCores));
|
||||
return GicDistributor::SgirTargetListFilter_CpuTargetList | irq | (static_cast<u16>(core_mask) << GicDistributor::SgirCpuTargetListShift);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE s32 GetGicMask(s32 core_id) {
|
||||
return s_mask[core_id];
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SetGicMask(s32 core_id) const {
|
||||
s_mask[core_id] = this->gicd->itargetsr.bytes[0];
|
||||
}
|
||||
|
||||
NOINLINE void SetupInterruptLines(s32 core_id) const;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_spin_lock.hpp>
|
||||
#include <mesosphere/kern_k_interrupt_task.hpp>
|
||||
#include <mesosphere/kern_select_interrupt_controller.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KInterruptManager {
|
||||
NON_COPYABLE(KInterruptManager);
|
||||
NON_MOVEABLE(KInterruptManager);
|
||||
private:
|
||||
struct KCoreLocalInterruptEntry {
|
||||
KInterruptHandler *handler;
|
||||
bool manually_cleared;
|
||||
bool needs_clear;
|
||||
u8 priority;
|
||||
|
||||
constexpr KCoreLocalInterruptEntry()
|
||||
: handler(nullptr), manually_cleared(false), needs_clear(false), priority(KInterruptController::PriorityLevel_Low)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
};
|
||||
|
||||
struct KGlobalInterruptEntry {
|
||||
KInterruptHandler *handler;
|
||||
bool manually_cleared;
|
||||
bool needs_clear;
|
||||
|
||||
constexpr KGlobalInterruptEntry() : handler(nullptr), manually_cleared(false), needs_clear(false) { /* ... */ }
|
||||
};
|
||||
private:
|
||||
static KSpinLock s_lock;
|
||||
static std::array<KGlobalInterruptEntry, KInterruptController::NumGlobalInterrupts> s_global_interrupts;
|
||||
static KInterruptController::GlobalState s_global_state;
|
||||
static bool s_global_state_saved;
|
||||
private:
|
||||
KCoreLocalInterruptEntry core_local_interrupts[KInterruptController::NumLocalInterrupts];
|
||||
KInterruptController interrupt_controller;
|
||||
KInterruptController::LocalState local_state;
|
||||
bool local_state_saved;
|
||||
private:
|
||||
static ALWAYS_INLINE KSpinLock &GetLock() { return s_lock; }
|
||||
static ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return s_global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; }
|
||||
ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return this->core_local_interrupts[KInterruptController::GetLocalInterruptIndex(irq)]; }
|
||||
|
||||
bool OnHandleInterrupt();
|
||||
public:
|
||||
constexpr KInterruptManager() : core_local_interrupts(), interrupt_controller(), local_state(), local_state_saved(false) { /* ... */ }
|
||||
NOINLINE void Initialize(s32 core_id);
|
||||
NOINLINE void Finalize(s32 core_id);
|
||||
|
||||
bool IsInterruptDefined(s32 irq) {
|
||||
return this->interrupt_controller.IsInterruptDefined(irq);
|
||||
}
|
||||
|
||||
NOINLINE Result BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
|
||||
NOINLINE Result UnbindHandler(s32 irq, s32 core);
|
||||
|
||||
NOINLINE Result ClearInterrupt(s32 irq);
|
||||
NOINLINE Result ClearInterrupt(s32 irq, s32 core_id);
|
||||
|
||||
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
|
||||
this->interrupt_controller.SendInterProcessorInterrupt(irq, core_mask);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq) {
|
||||
this->interrupt_controller.SendInterProcessorInterrupt(irq);
|
||||
}
|
||||
|
||||
static void HandleInterrupt(bool user_mode);
|
||||
|
||||
/* Implement more KInterruptManager functionality. */
|
||||
private:
|
||||
Result BindGlobal(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
|
||||
Result BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear);
|
||||
Result UnbindGlobal(s32 irq);
|
||||
Result UnbindLocal(s32 irq);
|
||||
Result ClearGlobal(s32 irq);
|
||||
Result ClearLocal(s32 irq);
|
||||
public:
|
||||
static ALWAYS_INLINE u32 DisableInterrupts() {
|
||||
u64 intr_state;
|
||||
__asm__ __volatile__("mrs %[intr_state], daif\n"
|
||||
"msr daifset, #2"
|
||||
: [intr_state]"=r"(intr_state)
|
||||
:: "memory");
|
||||
return intr_state;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE u32 EnableInterrupts() {
|
||||
u64 intr_state;
|
||||
__asm__ __volatile__("mrs %[intr_state], daif\n"
|
||||
"msr daifclr, #2"
|
||||
: [intr_state]"=r"(intr_state)
|
||||
:: "memory");
|
||||
return intr_state;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void RestoreInterrupts(u32 intr_state) {
|
||||
u64 cur_state;
|
||||
__asm__ __volatile__("mrs %[cur_state], daif" : [cur_state]"=r"(cur_state));
|
||||
__asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"((cur_state & ~0x80ul) | (intr_state & 0x80)));
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE bool AreInterruptsEnabled() {
|
||||
u64 intr_state;
|
||||
__asm__ __volatile__("mrs %[intr_state], daif" : [intr_state]"=r"(intr_state));
|
||||
return (intr_state & 0x80) == 0;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
namespace interrupt_name {
|
||||
|
||||
enum KInterruptName : s32 {
|
||||
/* SGIs */
|
||||
KInterruptName_ThreadTerminate = 4,
|
||||
KInterruptName_CacheOperation = 5,
|
||||
KInterruptName_Scheduler = 6,
|
||||
|
||||
KInterruptName_PerformanceCounter = 8,
|
||||
|
||||
/* PPIs */
|
||||
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
|
||||
KInterruptName_VirtualMaintenance = 25,
|
||||
KInterruptName_HypervisorTimer = 26,
|
||||
KInterruptName_VirtualTimer = 27,
|
||||
KInterruptName_LegacyNFiq = 38,
|
||||
KInterruptName_SecurePhysicalTimer = 29,
|
||||
KInterruptName_NonSecurePhysicalTimer = 30,
|
||||
KInterruptName_LegacyNIrq = 31,
|
||||
#endif
|
||||
|
||||
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
|
||||
KInterruptName_MemoryController = 109,
|
||||
#endif
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_page_table_base.hpp>
|
||||
#include <mesosphere/kern_k_page_group.hpp>
|
||||
#include <mesosphere/kern_k_page_table_manager.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KPageTable : public KPageTableBase {
|
||||
NON_COPYABLE(KPageTable);
|
||||
NON_MOVEABLE(KPageTable);
|
||||
public:
|
||||
using TraversalEntry = KPageTableImpl::TraversalEntry;
|
||||
using TraversalContext = KPageTableImpl::TraversalContext;
|
||||
|
||||
enum BlockType {
|
||||
BlockType_L3Block,
|
||||
BlockType_L3ContiguousBlock,
|
||||
BlockType_L2Block,
|
||||
|
||||
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||
BlockType_L2TegraSmmuBlock,
|
||||
#endif
|
||||
|
||||
BlockType_L2ContiguousBlock,
|
||||
BlockType_L1Block,
|
||||
|
||||
BlockType_Count,
|
||||
};
|
||||
static_assert(L3BlockSize == PageSize);
|
||||
static constexpr size_t ContiguousPageSize = L3ContiguousBlockSize;
|
||||
|
||||
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||
static constexpr size_t L2TegraSmmuBlockSize = 2 * L2BlockSize;
|
||||
#endif
|
||||
static constexpr size_t BlockSizes[BlockType_Count] = {
|
||||
[BlockType_L3Block] = L3BlockSize,
|
||||
[BlockType_L3ContiguousBlock] = L3ContiguousBlockSize,
|
||||
[BlockType_L2Block] = L2BlockSize,
|
||||
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||
[BlockType_L2TegraSmmuBlock] = L2TegraSmmuBlockSize,
|
||||
#endif
|
||||
[BlockType_L2ContiguousBlock] = L2ContiguousBlockSize,
|
||||
[BlockType_L1Block] = L1BlockSize,
|
||||
};
|
||||
|
||||
static constexpr BlockType GetMaxBlockType() {
|
||||
return BlockType_L1Block;
|
||||
}
|
||||
|
||||
static constexpr size_t GetBlockSize(BlockType type) {
|
||||
return BlockSizes[type];
|
||||
}
|
||||
|
||||
static constexpr BlockType GetBlockType(size_t size) {
|
||||
switch (size) {
|
||||
case L3BlockSize: return BlockType_L3Block;
|
||||
case L3ContiguousBlockSize: return BlockType_L3ContiguousBlock;
|
||||
case L2BlockSize: return BlockType_L2Block;
|
||||
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||
case L2TegraSmmuBlockSize: return BlockType_L2TegraSmmuBlock;
|
||||
#endif
|
||||
case L2ContiguousBlockSize: return BlockType_L2ContiguousBlock;
|
||||
case L1BlockSize: return BlockType_L1Block;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
||||
static constexpr size_t GetSmallerAlignment(size_t alignment) {
|
||||
MESOSPHERE_ASSERT(alignment > L3BlockSize);
|
||||
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) - 1));
|
||||
}
|
||||
|
||||
static constexpr size_t GetLargerAlignment(size_t alignment) {
|
||||
MESOSPHERE_ASSERT(alignment < L1BlockSize);
|
||||
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
|
||||
}
|
||||
private:
|
||||
KPageTableManager *manager;
|
||||
u64 ttbr;
|
||||
u8 asid;
|
||||
protected:
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||
virtual void FinalizeUpdate(PageLinkedList *page_list) override;
|
||||
|
||||
KPageTableManager &GetPageTableManager() const { return *this->manager; }
|
||||
private:
|
||||
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
|
||||
/* Set basic attributes. */
|
||||
PageTableEntry entry;
|
||||
entry.SetPrivilegedExecuteNever(true);
|
||||
entry.SetAccessFlag(PageTableEntry::AccessFlag_Accessed);
|
||||
entry.SetShareable(PageTableEntry::Shareable_InnerShareable);
|
||||
|
||||
if (!this->IsKernel()) {
|
||||
entry.SetGlobal(false);
|
||||
}
|
||||
|
||||
/* Set page attribute. */
|
||||
if (properties.io) {
|
||||
MESOSPHERE_ABORT_UNLESS(!properties.uncached);
|
||||
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
|
||||
|
||||
entry.SetPageAttribute(PageTableEntry::PageAttribute_Device_nGnRnE)
|
||||
.SetUserExecuteNever(true);
|
||||
} else if (properties.uncached) {
|
||||
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
|
||||
|
||||
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable);
|
||||
} else {
|
||||
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemory);
|
||||
}
|
||||
|
||||
/* Set user execute never bit. */
|
||||
if (properties.perm != KMemoryPermission_UserReadExecute) {
|
||||
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
|
||||
entry.SetUserExecuteNever(true);
|
||||
}
|
||||
|
||||
/* Set can be contiguous. */
|
||||
entry.SetContiguousAllowed(!properties.non_contiguous);
|
||||
|
||||
/* Set AP[1] based on perm. */
|
||||
switch (properties.perm & KMemoryPermission_UserReadWrite) {
|
||||
case KMemoryPermission_UserReadWrite:
|
||||
case KMemoryPermission_UserRead:
|
||||
entry.SetUserAccessible(true);
|
||||
break;
|
||||
case KMemoryPermission_KernelReadWrite:
|
||||
case KMemoryPermission_KernelRead:
|
||||
entry.SetUserAccessible(false);
|
||||
break;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
|
||||
/* Set AP[2] based on perm. */
|
||||
switch (properties.perm & KMemoryPermission_UserReadWrite) {
|
||||
case KMemoryPermission_UserReadWrite:
|
||||
case KMemoryPermission_KernelReadWrite:
|
||||
entry.SetReadOnly(false);
|
||||
break;
|
||||
case KMemoryPermission_KernelRead:
|
||||
case KMemoryPermission_UserRead:
|
||||
entry.SetReadOnly(true);
|
||||
break;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
|
||||
return entry;
|
||||
}
|
||||
public:
|
||||
constexpr KPageTable() : KPageTableBase(), manager(), ttbr(), asid() { /* ... */ }
|
||||
|
||||
static NOINLINE void Initialize(s32 core_id);
|
||||
|
||||
ALWAYS_INLINE void Activate(u32 proc_id) {
|
||||
cpu::DataSynchronizationBarrier();
|
||||
cpu::SwitchProcess(this->ttbr, proc_id);
|
||||
}
|
||||
|
||||
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
|
||||
NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager);
|
||||
Result Finalize();
|
||||
private:
|
||||
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||
Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll);
|
||||
|
||||
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, size_t page_size, PageLinkedList *page_list, bool reuse_ll) {
|
||||
switch (page_size) {
|
||||
case L1BlockSize:
|
||||
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||
case L2TegraSmmuBlockSize:
|
||||
#endif
|
||||
case L2BlockSize:
|
||||
case L3BlockSize:
|
||||
break;
|
||||
case L2ContiguousBlockSize:
|
||||
case L3ContiguousBlockSize:
|
||||
entry_template.SetContiguous(true);
|
||||
break;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
return this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll);
|
||||
}
|
||||
|
||||
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||
|
||||
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
|
||||
|
||||
ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||
Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||
|
||||
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll);
|
||||
|
||||
static void PteDataSynchronizationBarrier() {
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
}
|
||||
|
||||
static void ClearPageTable(KVirtualAddress table) {
|
||||
cpu::ClearPageToZero(GetVoidPointer(table));
|
||||
}
|
||||
|
||||
void OnTableUpdated() const {
|
||||
cpu::InvalidateTlbByAsid(this->asid);
|
||||
}
|
||||
|
||||
void OnKernelTableUpdated() const {
|
||||
cpu::InvalidateEntireTlbDataOnly();
|
||||
}
|
||||
|
||||
void OnKernelTableSinglePageUpdated(KProcessAddress virt_addr) const {
|
||||
cpu::InvalidateTlbByVaDataOnly(virt_addr);
|
||||
}
|
||||
|
||||
void NoteUpdated() const {
|
||||
cpu::DataSynchronizationBarrier();
|
||||
|
||||
if (this->IsKernel()) {
|
||||
this->OnKernelTableUpdated();
|
||||
} else {
|
||||
this->OnTableUpdated();
|
||||
}
|
||||
}
|
||||
|
||||
void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
|
||||
MESOSPHERE_ASSERT(this->IsKernel());
|
||||
|
||||
cpu::DataSynchronizationBarrier();
|
||||
this->OnKernelTableSinglePageUpdated(virt_addr);
|
||||
}
|
||||
|
||||
KVirtualAddress AllocatePageTable(PageLinkedList *page_list, bool reuse_ll) const {
|
||||
KVirtualAddress table = this->GetPageTableManager().Allocate();
|
||||
|
||||
if (table == Null<KVirtualAddress>) {
|
||||
if (reuse_ll && page_list->Peek()) {
|
||||
table = KVirtualAddress(reinterpret_cast<uintptr_t>(page_list->Pop()));
|
||||
} else {
|
||||
return Null<KVirtualAddress>;
|
||||
}
|
||||
}
|
||||
|
||||
ClearPageTable(table);
|
||||
|
||||
MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(table) == 0);
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
void FreePageTable(PageLinkedList *page_list, KVirtualAddress table) const {
|
||||
MESOSPHERE_ASSERT(this->GetPageTableManager().IsInPageTableHeap(table));
|
||||
MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(table) == 0);
|
||||
page_list->Push(table);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,288 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
constexpr size_t L1BlockSize = 1_GB;
|
||||
constexpr size_t L1ContiguousBlockSize = 0x10 * L1BlockSize;
|
||||
constexpr size_t L2BlockSize = 2_MB;
|
||||
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
|
||||
constexpr size_t L3BlockSize = PageSize;
|
||||
constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize;
|
||||
|
||||
class PageTableEntry {
|
||||
public:
|
||||
struct InvalidTag{};
|
||||
|
||||
enum Permission : u64 {
|
||||
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
||||
Permission_KernelRX = ((0ul << 53) | (1ul << 54) | (2ul << 6)),
|
||||
Permission_KernelR = ((1ul << 53) | (1ul << 54) | (2ul << 6)),
|
||||
Permission_KernelRW = ((1ul << 53) | (1ul << 54) | (0ul << 6)),
|
||||
|
||||
Permission_UserRX = ((1ul << 53) | (0ul << 54) | (3ul << 6)),
|
||||
Permission_UserR = ((1ul << 53) | (1ul << 54) | (3ul << 6)),
|
||||
Permission_UserRW = ((1ul << 53) | (1ul << 54) | (1ul << 6)),
|
||||
};
|
||||
|
||||
enum Shareable : u64 {
|
||||
Shareable_NonShareable = (0 << 8),
|
||||
Shareable_OuterShareable = (2 << 8),
|
||||
Shareable_InnerShareable = (3 << 8),
|
||||
};
|
||||
|
||||
/* Official attributes are: */
|
||||
/* 0x00, 0x04, 0xFF, 0x44. 4-7 are unused. */
|
||||
enum PageAttribute : u64 {
|
||||
PageAttribute_Device_nGnRnE = (0 << 2),
|
||||
PageAttribute_Device_nGnRE = (1 << 2),
|
||||
PageAttribute_NormalMemory = (2 << 2),
|
||||
PageAttribute_NormalMemoryNotCacheable = (3 << 2),
|
||||
};
|
||||
|
||||
enum AccessFlag : u64 {
|
||||
AccessFlag_NotAccessed = (0 << 10),
|
||||
AccessFlag_Accessed = (1 << 10),
|
||||
};
|
||||
|
||||
enum Type : u64 {
|
||||
Type_None = 0x0,
|
||||
Type_L1Block = 0x1,
|
||||
Type_L1Table = 0x3,
|
||||
Type_L2Block = 0x1,
|
||||
Type_L2Table = 0x3,
|
||||
Type_L3Block = 0x3,
|
||||
};
|
||||
|
||||
enum ContigType : u64 {
|
||||
ContigType_NotContiguous = (0x0ul << 52),
|
||||
ContigType_Contiguous = (0x1ul << 52),
|
||||
};
|
||||
protected:
|
||||
u64 attributes;
|
||||
public:
|
||||
/* Take in a raw attribute. */
|
||||
constexpr ALWAYS_INLINE PageTableEntry() : attributes() { /* ... */ }
|
||||
constexpr ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE PageTableEntry(InvalidTag) : attributes(0) { /* ... */ }
|
||||
|
||||
/* Extend a previous attribute. */
|
||||
constexpr ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ }
|
||||
|
||||
/* Construct a new attribute. */
|
||||
constexpr ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share)
|
||||
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share))
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||
return (this->attributes >> offset) & ((1ul << count) - 1);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const {
|
||||
return this->attributes & (((1ul << count) - 1) << offset);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
||||
const u64 mask = ((1ul << count) - 1) << offset;
|
||||
this->attributes &= ~mask;
|
||||
this->attributes |= (value & (mask >> offset)) << offset;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
|
||||
const u64 mask = ((1ul << count) - 1) << offset;
|
||||
this->attributes &= ~mask;
|
||||
this->attributes |= (value & mask);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
||||
const u64 mask = 1ul << offset;
|
||||
if (enabled) {
|
||||
this->attributes |= mask;
|
||||
} else {
|
||||
this->attributes &= ~mask;
|
||||
}
|
||||
}
|
||||
public:
|
||||
constexpr ALWAYS_INLINE bool IsContiguousAllowed() const { return this->GetBits(55, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsUserExecuteNever() const { return this->GetBits(54, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsGlobal() const { return this->GetBits(11, 1) == 0; }
|
||||
constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast<AccessFlag>(this->GetBits(10, 1)); }
|
||||
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->GetBits(8, 2)); }
|
||||
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->GetBits(2, 3)); }
|
||||
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x1; }
|
||||
constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBits(0, 2) == 0x3; }
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetContiguousAllowed(bool en) { this->SetBit(55, !en); return *this; }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetPrivilegedExecuteNever(bool en) { this->SetBit(53, en); return *this; }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetContiguous(bool en) { this->SetBit(52, en); return *this; }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetGlobal(bool en) { this->SetBit(11, !en); return *this; }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetAccessFlag(AccessFlag f) { this->SetBitsDirect(10, 1, f); return *this; }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetShareable(Shareable s) { this->SetBitsDirect(8, 2, s); return *this; }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetReadOnly(bool en) { this->SetBit(7, en); return *this; }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetUserAccessible(bool en) { this->SetBit(6, en); return *this; }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; }
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetEntryTemplate() const {
|
||||
constexpr u64 Mask = (0xFFF0000000000FFFul & ~u64(0x3ul | (0x1ul << 52)));
|
||||
return this->attributes & Mask;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool Is(u64 attr) const {
|
||||
return this->attributes == attr;
|
||||
}
|
||||
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
|
||||
return this->attributes;
|
||||
}
|
||||
};
|
||||
|
||||
static_assert(sizeof(PageTableEntry) == sizeof(u64));
|
||||
|
||||
constexpr inline PageTableEntry InvalidPageTableEntry = PageTableEntry(PageTableEntry::InvalidTag{});
|
||||
|
||||
constexpr inline size_t MaxPageTableEntries = PageSize / sizeof(PageTableEntry);
|
||||
|
||||
class L1PageTableEntry : public PageTableEntry {
|
||||
public:
|
||||
constexpr ALWAYS_INLINE L1PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool pxn)
|
||||
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool is_kernel, bool pxn)
|
||||
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||
return this->SelectBits(30, 18);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
||||
return this->SelectBits(12, 36);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool GetTable(KPhysicalAddress &out) const {
|
||||
if (this->IsTable()) {
|
||||
out = this->GetTable();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||
return L1PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
||||
}
|
||||
};
|
||||
|
||||
class L2PageTableEntry : public PageTableEntry {
|
||||
public:
|
||||
constexpr ALWAYS_INLINE L2PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool pxn)
|
||||
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool is_kernel, bool pxn)
|
||||
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||
return this->SelectBits(21, 27);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
||||
return this->SelectBits(12, 36);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool GetTable(KPhysicalAddress &out) const {
|
||||
if (this->IsTable()) {
|
||||
out = this->GetTable();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||
return L2PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
||||
}
|
||||
};
|
||||
|
||||
class L3PageTableEntry : public PageTableEntry {
|
||||
public:
|
||||
constexpr ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE L3PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x3)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x3; }
|
||||
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||
return this->SelectBits(12, 36);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||
return L3PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
||||
}
|
||||
};
|
||||
|
||||
constexpr inline L1PageTableEntry InvalidL1PageTableEntry = L1PageTableEntry(PageTableEntry::InvalidTag{});
|
||||
constexpr inline L2PageTableEntry InvalidL2PageTableEntry = L2PageTableEntry(PageTableEntry::InvalidTag{});
|
||||
constexpr inline L3PageTableEntry InvalidL3PageTableEntry = L3PageTableEntry(PageTableEntry::InvalidTag{});
|
||||
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include <mesosphere/kern_k_memory_layout.hpp>
|
||||
#include <mesosphere/arch/arm64/kern_k_page_table_entry.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KPageTableImpl {
|
||||
NON_COPYABLE(KPageTableImpl);
|
||||
NON_MOVEABLE(KPageTableImpl);
|
||||
public:
|
||||
struct TraversalEntry {
|
||||
KPhysicalAddress phys_addr;
|
||||
size_t block_size;
|
||||
};
|
||||
|
||||
struct TraversalContext {
|
||||
const L1PageTableEntry *l1_entry;
|
||||
const L2PageTableEntry *l2_entry;
|
||||
const L3PageTableEntry *l3_entry;
|
||||
};
|
||||
private:
|
||||
static constexpr size_t PageBits = __builtin_ctzll(PageSize);
|
||||
static constexpr size_t NumLevels = 3;
|
||||
static constexpr size_t LevelBits = 9;
|
||||
static_assert(NumLevels > 0);
|
||||
|
||||
template<size_t Offset, size_t Count>
|
||||
static constexpr ALWAYS_INLINE u64 GetBits(u64 value) {
|
||||
return (value >> Offset) & ((1ul << Count) - 1);
|
||||
}
|
||||
|
||||
template<size_t Offset, size_t Count>
|
||||
constexpr ALWAYS_INLINE u64 SelectBits(u64 value) {
|
||||
return value & (((1ul << Count) - 1) << Offset);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetL0Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 0), LevelBits>(GetInteger(addr)); }
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetL1Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 1), LevelBits>(GetInteger(addr)); }
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetL2Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 2), LevelBits>(GetInteger(addr)); }
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetL3Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 3), LevelBits>(GetInteger(addr)); }
|
||||
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1)>(GetInteger(addr)); }
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2)>(GetInteger(addr)); }
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3)>(GetInteger(addr)); }
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1) + 4>(GetInteger(addr)); }
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2) + 4>(GetInteger(addr)); }
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3) + 4>(GetInteger(addr)); }
|
||||
|
||||
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
|
||||
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const;
|
||||
ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const;
|
||||
ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const;
|
||||
private:
|
||||
L1PageTableEntry *table;
|
||||
bool is_kernel;
|
||||
u32 num_entries;
|
||||
public:
|
||||
ALWAYS_INLINE KVirtualAddress GetTableEntry(KVirtualAddress table, size_t index) const {
|
||||
return table + index * sizeof(PageTableEntry);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KProcessAddress address) const {
|
||||
return GetPointer<L1PageTableEntry>(GetTableEntry(KVirtualAddress(this->table), GetL1Index(address) & (this->num_entries - 1)));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE L2PageTableEntry *GetL2EntryFromTable(KVirtualAddress table, KProcessAddress address) const {
|
||||
return GetPointer<L2PageTableEntry>(GetTableEntry(table, GetL2Index(address)));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KProcessAddress address) const {
|
||||
return GetL2EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE L3PageTableEntry *GetL3EntryFromTable(KVirtualAddress table, KProcessAddress address) const {
|
||||
return GetPointer<L3PageTableEntry>(GetTableEntry(table, GetL3Index(address)));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const {
|
||||
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
||||
}
|
||||
public:
|
||||
constexpr KPageTableImpl() : table(), is_kernel(), num_entries() { /* ... */ }
|
||||
|
||||
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||
NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||
L1PageTableEntry *Finalize();
|
||||
|
||||
bool BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const;
|
||||
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
|
||||
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/arch/arm64/kern_k_page_table.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KProcessPageTable {
|
||||
private:
|
||||
KPageTable page_table;
|
||||
public:
|
||||
constexpr KProcessPageTable() : page_table() { /* ... */ }
|
||||
|
||||
void Activate(u64 id) {
|
||||
/* Activate the page table with the specified contextidr. */
|
||||
this->page_table.Activate(id);
|
||||
}
|
||||
|
||||
Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) {
|
||||
return this->page_table.InitializeForProcess(id, as_type, enable_aslr, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager);
|
||||
}
|
||||
|
||||
void Finalize() { this->page_table.Finalize(); }
|
||||
|
||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||
return this->page_table.SetMemoryPermission(addr, size, perm);
|
||||
}
|
||||
|
||||
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||
return this->page_table.SetProcessMemoryPermission(addr, size, perm);
|
||||
}
|
||||
|
||||
Result SetHeapSize(KProcessAddress *out, size_t size) {
|
||||
return this->page_table.SetHeapSize(out, size);
|
||||
}
|
||||
|
||||
Result SetMaxHeapSize(size_t size) {
|
||||
return this->page_table.SetMaxHeapSize(size);
|
||||
}
|
||||
|
||||
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
|
||||
return this->page_table.QueryInfo(out_info, out_page_info, addr);
|
||||
}
|
||||
|
||||
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
return this->page_table.MapIo(phys_addr, size, perm);
|
||||
}
|
||||
|
||||
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
return this->page_table.MapStatic(phys_addr, size, perm);
|
||||
}
|
||||
|
||||
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
|
||||
return this->page_table.MapRegion(region_type, perm);
|
||||
}
|
||||
|
||||
Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPageGroup(addr, pg, state, perm);
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm);
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPages(out_addr, num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
|
||||
return this->page_table.UnmapPages(addr, num_pages, state);
|
||||
}
|
||||
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
||||
return this->page_table.GetPhysicalAddress(out, address);
|
||||
}
|
||||
|
||||
bool Contains(KProcessAddress addr, size_t size) const { return this->page_table.Contains(addr, size); }
|
||||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->page_table.CanContain(addr, size, state); }
|
||||
|
||||
KProcessAddress GetAddressSpaceStart() const { return this->page_table.GetAddressSpaceStart(); }
|
||||
KProcessAddress GetHeapRegionStart() const { return this->page_table.GetHeapRegionStart(); }
|
||||
KProcessAddress GetAliasRegionStart() const { return this->page_table.GetAliasRegionStart(); }
|
||||
KProcessAddress GetStackRegionStart() const { return this->page_table.GetStackRegionStart(); }
|
||||
KProcessAddress GetKernelMapRegionStart() const { return this->page_table.GetKernelMapRegionStart(); }
|
||||
|
||||
size_t GetAddressSpaceSize() const { return this->page_table.GetAddressSpaceSize(); }
|
||||
size_t GetHeapRegionSize() const { return this->page_table.GetHeapRegionSize(); }
|
||||
size_t GetAliasRegionSize() const { return this->page_table.GetAliasRegionSize(); }
|
||||
size_t GetStackRegionSize() const { return this->page_table.GetStackRegionSize(); }
|
||||
size_t GetKernelMapRegionSize() const { return this->page_table.GetKernelMapRegionSize(); }
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KNotAlignedSpinLock {
|
||||
private:
|
||||
u32 packed_tickets;
|
||||
public:
|
||||
constexpr KNotAlignedSpinLock() : packed_tickets(0) { /* ... */ }
|
||||
|
||||
void Lock() {
|
||||
u32 tmp0, tmp1;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" prfm pstl1keep, %[packed_tickets]\n"
|
||||
"1:\n"
|
||||
" ldaxr %w[tmp0], %[packed_tickets]\n"
|
||||
" add %w[tmp0], %w[tmp0], #0x10000\n"
|
||||
" stxr %w[tmp1], %w[tmp0], %[packed_tickets]\n"
|
||||
" cbnz %w[tmp1], 1b\n"
|
||||
" \n"
|
||||
" and %w[tmp1], %w[tmp0], #0xFFFF\n"
|
||||
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
|
||||
" b.eq done"
|
||||
" sevl\n"
|
||||
"2:\n"
|
||||
" wfe\n"
|
||||
" ldaxrh %w[tmp1], %[packed_tickets]\n"
|
||||
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
|
||||
" b.ne 2b\n"
|
||||
"done:\n"
|
||||
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [packed_tickets]"+Q"(this->packed_tickets)
|
||||
:
|
||||
: "cc", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
void Unlock() {
|
||||
const u32 value = this->packed_tickets + 1;
|
||||
__asm__ __volatile__(
|
||||
" stlrh %w[value], %[packed_tickets]\n"
|
||||
: [packed_tickets]"+Q"(this->packed_tickets)
|
||||
: [value]"r"(value)
|
||||
: "memory"
|
||||
);
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(KNotAlignedSpinLock) == sizeof(u32));
|
||||
|
||||
class KAlignedSpinLock {
|
||||
private:
|
||||
alignas(cpu::DataCacheLineSize) u16 current_ticket;
|
||||
alignas(cpu::DataCacheLineSize) u16 next_ticket;
|
||||
public:
|
||||
constexpr KAlignedSpinLock() : current_ticket(0), next_ticket(0) { /* ... */ }
|
||||
|
||||
void Lock() {
|
||||
u32 tmp0, tmp1, got_lock;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" prfm pstl1keep, %[next_ticket]\n"
|
||||
"1:\n"
|
||||
" ldaxrh %w[tmp0], %[next_ticket]\n"
|
||||
" add %w[tmp1], %w[tmp0], #0x1\n"
|
||||
" stxrh %w[got_lock], %w[tmp1], %[next_ticket]\n"
|
||||
" cbnz %w[got_lock], 1b\n"
|
||||
" \n"
|
||||
" sevl\n"
|
||||
"2:\n"
|
||||
" wfe\n"
|
||||
" ldaxrh %w[tmp1], %[current_ticket]\n"
|
||||
" cmp %w[tmp1], %w[tmp0]\n"
|
||||
" b.ne 2b\n"
|
||||
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [next_ticket]"+Q"(this->next_ticket)
|
||||
: [current_ticket]"Q"(this->current_ticket)
|
||||
: "cc", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
void Unlock() {
|
||||
const u32 value = this->current_ticket + 1;
|
||||
__asm__ __volatile__(
|
||||
" stlrh %w[value], %[current_ticket]\n"
|
||||
: [current_ticket]"+Q"(this->current_ticket)
|
||||
: [value]"r"(value)
|
||||
: "memory"
|
||||
);
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(KAlignedSpinLock) == 2 * cpu::DataCacheLineSize);
|
||||
|
||||
using KSpinLock = KAlignedSpinLock;
|
||||
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/arch/arm64/kern_k_page_table.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KSupervisorPageTable {
|
||||
private:
|
||||
KPageTable page_table;
|
||||
u64 ttbr0[cpu::NumCores];
|
||||
public:
|
||||
constexpr KSupervisorPageTable() : page_table(), ttbr0() { /* ... */ }
|
||||
|
||||
NOINLINE void Initialize(s32 core_id);
|
||||
|
||||
void Activate() {
|
||||
/* Activate, using process id = 0xFFFFFFFF */
|
||||
this->page_table.Activate(0xFFFFFFFF);
|
||||
}
|
||||
|
||||
void ActivateForInit() {
|
||||
this->Activate();
|
||||
|
||||
/* Invalidate entire TLB. */
|
||||
cpu::InvalidateEntireTlb();
|
||||
}
|
||||
|
||||
void Finalize(s32 core_id);
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
|
||||
return this->page_table.UnmapPages(address, num_pages, state);
|
||||
}
|
||||
|
||||
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||
return this->page_table.UnmapPageGroup(address, pg, state);
|
||||
}
|
||||
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
||||
return this->page_table.GetPhysicalAddress(out, address);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KThread;
|
||||
|
||||
}
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KThreadContext {
|
||||
public:
|
||||
static constexpr size_t NumCalleeSavedRegisters = (29 - 19) + 1;
|
||||
static constexpr size_t NumFpuRegisters = 32;
|
||||
private:
|
||||
union {
|
||||
u64 registers[NumCalleeSavedRegisters];
|
||||
struct {
|
||||
u64 x19;
|
||||
u64 x20;
|
||||
u64 x21;
|
||||
u64 x22;
|
||||
u64 x23;
|
||||
u64 x24;
|
||||
u64 x25;
|
||||
u64 x26;
|
||||
u64 x27;
|
||||
u64 x28;
|
||||
u64 x29;
|
||||
};
|
||||
} callee_saved;
|
||||
u64 lr;
|
||||
u64 sp;
|
||||
u64 cpacr;
|
||||
u64 fpcr;
|
||||
u64 fpsr;
|
||||
alignas(0x10) u128 fpu_registers[NumFpuRegisters];
|
||||
bool locked;
|
||||
private:
|
||||
static void RestoreFpuRegisters64(const KThreadContext &);
|
||||
static void RestoreFpuRegisters32(const KThreadContext &);
|
||||
public:
|
||||
constexpr explicit KThreadContext() : callee_saved(), lr(), sp(), cpacr(), fpcr(), fpsr(), fpu_registers(), locked() { /* ... */ }
|
||||
|
||||
Result Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main);
|
||||
Result Finalize();
|
||||
|
||||
void SetArguments(uintptr_t arg0, uintptr_t arg1);
|
||||
|
||||
static void FpuContextSwitchHandler(KThread *thread);
|
||||
|
||||
/* TODO: More methods (especially FPU management) */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
void UserspaceAccessFunctionAreaBegin();
|
||||
|
||||
class UserspaceAccess {
|
||||
public:
|
||||
static bool CopyMemoryFromUser(void *dst, const void *src, size_t size);
|
||||
static bool CopyMemoryFromUserAligned32Bit(void *dst, const void *src, size_t size);
|
||||
static bool CopyMemoryFromUserAligned64Bit(void *dst, const void *src, size_t size);
|
||||
static bool CopyMemoryFromUserSize32Bit(void *dst, const void *src);
|
||||
static s32 CopyStringFromUser(void *dst, const void *src, size_t size);
|
||||
|
||||
static bool CopyMemoryToUser(void *dst, const void *src, size_t size);
|
||||
static bool CopyMemoryToUserAligned32Bit(void *dst, const void *src, size_t size);
|
||||
static bool CopyMemoryToUserAligned64Bit(void *dst, const void *src, size_t size);
|
||||
static bool CopyMemoryToUserSize32Bit(void *dst, const void *src);
|
||||
static s32 CopyStringToUser(void *dst, const void *src, size_t size);
|
||||
|
||||
static bool ClearMemory(void *dst, size_t size);
|
||||
static bool ClearMemoryAligned32Bit(void *dst, size_t size);
|
||||
static bool ClearMemoryAligned64Bit(void *dst, size_t size);
|
||||
static bool ClearMemorySize32Bit(void *dst);
|
||||
|
||||
static bool StoreDataCache(uintptr_t start, uintptr_t end);
|
||||
static bool FlushDataCache(uintptr_t start, uintptr_t end);
|
||||
static bool InvalidateDataCache(uintptr_t start, uintptr_t end);
|
||||
static bool InvalidateInstructionCache(uintptr_t start, uintptr_t end);
|
||||
|
||||
static bool ReadIoMemory32Bit(void *dst, const void *src, size_t size);
|
||||
static bool ReadIoMemory16Bit(void *dst, const void *src, size_t size);
|
||||
static bool ReadIoMemory8Bit(void *dst, const void *src, size_t size);
|
||||
static bool WriteIoMemory32Bit(void *dst, const void *src, size_t size);
|
||||
static bool WriteIoMemory16Bit(void *dst, const void *src, size_t size);
|
||||
static bool WriteIoMemory8Bit(void *dst, const void *src, size_t size);
|
||||
};
|
||||
|
||||
|
||||
void UserspaceAccessFunctionAreaEnd();
|
||||
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_page_group.hpp>
|
||||
#include <mesosphere/kern_k_memory_manager.hpp>
|
||||
#include <mesosphere/kern_select_page_table.hpp>
|
||||
|
||||
namespace ams::kern::board::nintendo::nx {
|
||||
|
||||
using KDeviceVirtualAddress = u64;
|
||||
|
||||
class KDevicePageTable {
|
||||
private:
|
||||
static constexpr size_t TableCount = 4;
|
||||
private:
|
||||
KVirtualAddress tables[TableCount];
|
||||
u8 table_asids[TableCount];
|
||||
u64 attached_device;
|
||||
u32 attached_value;
|
||||
u32 detached_value;
|
||||
u32 hs_attached_value;
|
||||
u32 hs_detached_value;
|
||||
private:
|
||||
static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
|
||||
return KPageTable::GetHeapVirtualAddress(addr);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
|
||||
return KPageTable::GetHeapPhysicalAddress(addr);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
|
||||
return KPageTable::GetPageTableVirtualAddress(addr);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KPhysicalAddress GetPageTablePhysicalAddress(KVirtualAddress addr) {
|
||||
return KPageTable::GetPageTablePhysicalAddress(addr);
|
||||
}
|
||||
public:
|
||||
constexpr KDevicePageTable() : tables(), table_asids(), attached_device(), attached_value(), detached_value(), hs_attached_value(), hs_detached_value() { /* ... */ }
|
||||
|
||||
static void Initialize();
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern::board::nintendo::nx {
|
||||
|
||||
class KSystemControl {
|
||||
public:
|
||||
class Init {
|
||||
public:
|
||||
/* Initialization. */
|
||||
static size_t GetIntendedMemorySize();
|
||||
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
||||
static bool ShouldIncreaseThreadResourceLimit();
|
||||
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||
static size_t GetApplicationPoolSize();
|
||||
static size_t GetAppletPoolSize();
|
||||
static size_t GetMinimumNonSecureSystemPoolSize();
|
||||
|
||||
/* Randomness. */
|
||||
static void GenerateRandomBytes(void *dst, size_t size);
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
};
|
||||
public:
|
||||
/* Initialization. */
|
||||
static NOINLINE void InitializePhase1();
|
||||
static NOINLINE void InitializePhase2();
|
||||
static NOINLINE u32 GetInitialProcessBinaryPool();
|
||||
|
||||
/* Randomness. */
|
||||
static void GenerateRandomBytes(void *dst, size_t size);
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
|
||||
/* Privileged Access. */
|
||||
static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
||||
static void ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
||||
|
||||
static ALWAYS_INLINE u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address) {
|
||||
u32 v;
|
||||
ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0);
|
||||
return v;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) {
|
||||
u32 v;
|
||||
ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value);
|
||||
}
|
||||
|
||||
/* Power management. */
|
||||
static void SleepSystem();
|
||||
static NORETURN void StopSystem();
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
|
||||
#ifdef ATMOSPHERE_ARCH_ARM64
|
||||
#include <mesosphere/arch/arm64/init/kern_k_init_arguments.hpp>
|
||||
#else
|
||||
#error "Unknown architecture for KInitArguments"
|
||||
#endif
|
||||
|
||||
namespace ams::kern::init {
|
||||
|
||||
KPhysicalAddress GetInitArgumentsAddress(s32 core_id);
|
||||
void SetInitArguments(s32 core_id, KPhysicalAddress address, uintptr_t arg);
|
||||
void StoreInitArguments();
|
||||
|
||||
}
|
|
@ -17,23 +17,21 @@
|
|||
#include <vapours.hpp>
|
||||
|
||||
#ifdef ATMOSPHERE_ARCH_ARM64
|
||||
#include "kern_init_elf64.hpp"
|
||||
#include <mesosphere/init/kern_init_elf64.hpp>
|
||||
|
||||
namespace ams::kern::init::Elf {
|
||||
using namespace ams::kern::init::Elf::Elf64;
|
||||
|
||||
enum RelocationType {
|
||||
R_ARCHITECTURE_RELATIVE = 0x403, /* Real name R_AARCH64_RELATIVE */
|
||||
};
|
||||
}
|
||||
#else
|
||||
#error "Unknown Architecture"
|
||||
#endif
|
||||
|
||||
namespace ams::kern::init::Elf {
|
||||
|
||||
#ifdef ATMOSPHERE_ARCH_ARM64
|
||||
using namespace ams::kern::init::Elf::Elf64;
|
||||
|
||||
enum RelocationType {
|
||||
R_ARCHITECTURE_RELATIVE = 0x403, /* Real name R_AARCH64_RELATIVE */
|
||||
};
|
||||
#else
|
||||
#error "Unknown Architecture"
|
||||
#endif
|
||||
|
||||
/* API to apply relocations or call init array. */
|
||||
void ApplyRelocations(uintptr_t base_address, const Dyn *dynamic);
|
||||
void CallInitArrayFuncs(uintptr_t init_array_start, uintptr_t init_array_end);
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace ams::kern::init {
|
|||
u32 rw_end_offset;
|
||||
u32 bss_offset;
|
||||
u32 bss_end_offset;
|
||||
u32 ini_end_offset;
|
||||
u32 ini_load_offset;
|
||||
u32 dynamic_offset;
|
||||
u32 init_array_offset;
|
||||
u32 init_array_end_offset;
|
||||
|
|
|
@ -14,9 +14,16 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
#ifdef ATMOSPHERE_ARCH_ARM64
|
||||
#include "../arch/arm64/init/kern_k_init_page_table.hpp"
|
||||
#include <mesosphere/arch/arm64/init/kern_k_init_page_table.hpp>
|
||||
|
||||
namespace ams::kern::init {
|
||||
using ams::kern::arch::arm64::PageTableEntry;
|
||||
using ams::kern::arch::arm64::init::KInitialPageTable;
|
||||
using ams::kern::arch::arm64::init::KInitialPageAllocator;
|
||||
}
|
||||
#else
|
||||
#error "Unknown architecture for KInitialPageTable"
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include <mesosphere/kern_k_slab_heap.hpp>
|
||||
|
||||
namespace ams::kern::init {
|
||||
|
||||
struct KSlabResourceCounts {
|
||||
size_t num_KProcess;
|
||||
size_t num_KThread;
|
||||
size_t num_KEvent;
|
||||
size_t num_KInterruptEvent;
|
||||
size_t num_KPort;
|
||||
size_t num_KSharedMemory;
|
||||
size_t num_KTransferMemory;
|
||||
size_t num_KCodeMemory;
|
||||
size_t num_KDeviceAddressSpace;
|
||||
size_t num_KSession;
|
||||
size_t num_KLightSession;
|
||||
size_t num_KObjectName;
|
||||
size_t num_KResourceLimit;
|
||||
size_t num_KDebug;
|
||||
};
|
||||
|
||||
NOINLINE void InitializeSlabResourceCounts();
|
||||
const KSlabResourceCounts &GetSlabResourceCounts();
|
||||
|
||||
size_t CalculateTotalSlabHeapSize();
|
||||
NOINLINE void InitializeKPageBufferSlabHeap();
|
||||
NOINLINE void InitializeSlabHeaps();
|
||||
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
constexpr size_t PageSize = 4_KB;
|
||||
|
||||
}
|
||||
|
||||
#if 1
|
||||
#define MESOSPHERE_BUILD_FOR_AUDITING
|
||||
#endif
|
||||
|
||||
#ifdef MESOSPHERE_BUILD_FOR_AUDITING
|
||||
#define MESOSPHERE_BUILD_FOR_DEBUGGING
|
||||
#endif
|
||||
|
||||
#ifdef MESOSPHERE_BUILD_FOR_DEBUGGING
|
||||
#define MESOSPHERE_ENABLE_ASSERTIONS
|
||||
#define MESOSPHERE_ENABLE_DEBUG_PRINT
|
||||
#endif
|
||||
|
||||
#include <mesosphere/svc/kern_svc_results.hpp>
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KDebugLog {
|
||||
private:
|
||||
static NOINLINE void VSNPrintf(char *dst, const size_t dst_size, const char *format, ::std::va_list vl);
|
||||
public:
|
||||
static NOINLINE void Initialize();
|
||||
|
||||
static NOINLINE void Printf(const char *format, ...) __attribute__((format(printf, 1, 2)));
|
||||
static NOINLINE void VPrintf(const char *format, ::std::va_list vl);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#ifndef MESOSPHERE_DEBUG_LOG_SELECTED
|
||||
|
||||
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||
#define MESOSPHERE_DEBUG_LOG_USE_UART_C
|
||||
#else
|
||||
#error "Unknown board for Default Debug Log Source"
|
||||
#endif
|
||||
|
||||
#define MESOSPHERE_DEBUG_LOG_SELECTED
|
||||
|
||||
#endif
|
||||
|
||||
#define MESOSPHERE_RELEASE_LOG(fmt, ...) ::ams::kern::KDebugLog::Printf((fmt), ## __VA_ARGS__)
|
||||
#define MESOSPHERE_RELEASE_VLOG(fmt, vl) ::ams::kern::KDebugLog::VPrintf((fmt), (vl))
|
||||
|
||||
#ifdef MESOSPHERE_ENABLE_DEBUG_PRINT
|
||||
#define MESOSPHERE_LOG(fmt, ...) MESOSPHERE_RELEASE_LOG((fmt), ## __VA_ARGS__)
|
||||
#define MESOSPHERE_VLOG(fmt, vl) MESOSPHERE_RELEASE_VLOG((fmt), (vl))
|
||||
#else
|
||||
#define MESOSPHERE_LOG(fmt, ...) do { MESOSPHERE_UNUSED(fmt); MESOSPHERE_UNUSED(__VA_ARGS__); } while (0)
|
||||
#define MESOSPHERE_VLOG(fmt, vl) do { MESOSPHERE_UNUSED(fmt); MESOSPHERE_UNUSED(vl); } while (0)
|
||||
#endif
|
|
@ -14,8 +14,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include "kern_panic.hpp"
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_initial_process_reader.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
|
@ -29,4 +29,10 @@ namespace ams::kern {
|
|||
u32 reserved;
|
||||
};
|
||||
|
||||
NOINLINE void CopyInitialProcessBinaryToKernelMemory();
|
||||
NOINLINE void CreateAndRunInitialProcesses();
|
||||
|
||||
u64 GetInitialProcessIdMin();
|
||||
u64 GetInitialProcessIdMax();
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_condition_variable.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KAddressArbiter {
|
||||
public:
|
||||
using ThreadTree = KConditionVariable::ThreadTree;
|
||||
private:
|
||||
ThreadTree tree;
|
||||
public:
|
||||
constexpr KAddressArbiter() : tree() { /* ... */ }
|
||||
|
||||
Result SignalToAddress(uintptr_t addr, ams::svc::SignalType type, s32 value, s32 count) {
|
||||
switch (type) {
|
||||
case ams::svc::SignalType_Signal:
|
||||
return this->Signal(addr, count);
|
||||
case ams::svc::SignalType_SignalAndIncrementIfEqual:
|
||||
return this->SignalAndIncrementIfEqual(addr, value, count);
|
||||
case ams::svc::SignalType_SignalAndModifyByWaitingCountIfEqual:
|
||||
return this->SignalAndModifyByWaitingCountIfEqual(addr, value, count);
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
||||
Result WaitForAddress(uintptr_t addr, ams::svc::ArbitrationType type, s32 value, s64 timeout) {
|
||||
switch (type) {
|
||||
case ams::svc::ArbitrationType_WaitIfLessThan:
|
||||
return this->WaitIfLessThan(addr, value, false, timeout);
|
||||
case ams::svc::ArbitrationType_DecrementAndWaitIfLessThan:
|
||||
return this->WaitIfLessThan(addr, value, true, timeout);
|
||||
case ams::svc::ArbitrationType_WaitIfEqual:
|
||||
return this->WaitIfEqual(addr, value, timeout);
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
private:
|
||||
Result Signal(uintptr_t addr, s32 count);
|
||||
Result SignalAndIncrementIfEqual(uintptr_t addr, s32 value, s32 count);
|
||||
Result SignalAndModifyByWaitingCountIfEqual(uintptr_t addr, s32 value, s32 count);
|
||||
Result WaitIfLessThan(uintptr_t addr, s32 value, bool decrement, s64 timeout);
|
||||
Result WaitIfEqual(uintptr_t addr, s32 value, s64 timeout);
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
|
||||
|
||||
struct KAddressSpaceInfo {
|
||||
public:
|
||||
enum Type {
|
||||
Type_32Bit = 0,
|
||||
Type_Small64Bit = 1,
|
||||
Type_Large64Bit = 2,
|
||||
Type_Heap = 3,
|
||||
Type_Stack = 4,
|
||||
Type_Alias = 5,
|
||||
|
||||
Type_Count,
|
||||
};
|
||||
private:
|
||||
size_t bit_width;
|
||||
size_t address;
|
||||
size_t size;
|
||||
Type type;
|
||||
public:
|
||||
static uintptr_t GetAddressSpaceStart(size_t width, Type type);
|
||||
static size_t GetAddressSpaceSize(size_t width, Type type);
|
||||
|
||||
constexpr KAddressSpaceInfo(size_t bw, size_t a, size_t s, Type t) : bit_width(bw), address(a), size(s), type(t) { /* ... */ }
|
||||
|
||||
constexpr size_t GetWidth() const { return this->bit_width; }
|
||||
constexpr size_t GetAddress() const { return this->address; }
|
||||
constexpr size_t GetSize() const { return this->size; }
|
||||
constexpr Type GetType() const { return this->type; }
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KAffinityMask {
|
||||
private:
|
||||
static constexpr u64 AllowedAffinityMask = (1ul << cpu::NumCores) - 1;
|
||||
private:
|
||||
u64 mask;
|
||||
private:
|
||||
static constexpr ALWAYS_INLINE u64 GetCoreBit(s32 core) {
|
||||
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
|
||||
return (1ul << core);
|
||||
}
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KAffinityMask() : mask(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetAffinityMask() const { return this->mask; }
|
||||
|
||||
constexpr ALWAYS_INLINE void SetAffinityMask(u64 new_mask) {
|
||||
MESOSPHERE_ASSERT((new_mask & ~AllowedAffinityMask) == 0);
|
||||
this->mask = new_mask;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool GetAffinity(s32 core) const {
|
||||
return this->mask & GetCoreBit(core);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetAffinity(s32 core, bool set) {
|
||||
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
|
||||
|
||||
if (set) {
|
||||
this->mask |= GetCoreBit(core);
|
||||
} else {
|
||||
this->mask &= ~GetCoreBit(core);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetAll() {
|
||||
this->mask = AllowedAffinityMask;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include <mesosphere/kern_k_class_token.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KProcess;
|
||||
|
||||
#define MESOSPHERE_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
|
||||
NON_COPYABLE(CLASS); \
|
||||
NON_MOVEABLE(CLASS); \
|
||||
private: \
|
||||
friend class ::ams::kern::KClassTokenGenerator; \
|
||||
static constexpr inline auto ObjectType = ::ams::kern::KClassTokenGenerator::ObjectType::CLASS; \
|
||||
static constexpr inline const char * const TypeName = #CLASS; \
|
||||
static constexpr inline ClassTokenType ClassToken() { return ::ams::kern::ClassToken<CLASS>; } \
|
||||
public: \
|
||||
using BaseClass = BASE_CLASS; \
|
||||
static constexpr ALWAYS_INLINE TypeObj GetStaticTypeObj() { \
|
||||
constexpr ClassTokenType Token = ClassToken(); \
|
||||
return TypeObj(TypeName, Token); \
|
||||
} \
|
||||
static constexpr ALWAYS_INLINE const char *GetStaticTypeName() { return TypeName; } \
|
||||
virtual TypeObj GetTypeObj() const { return GetStaticTypeObj(); } \
|
||||
virtual const char *GetTypeName() { return GetStaticTypeName(); } \
|
||||
private:
|
||||
|
||||
|
||||
class KAutoObject {
|
||||
protected:
|
||||
class TypeObj {
|
||||
private:
|
||||
const char *name;
|
||||
ClassTokenType class_token;
|
||||
public:
|
||||
constexpr explicit TypeObj(const char *n, ClassTokenType tok) : name(n), class_token(tok) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE const char *GetName() const { return this->name; }
|
||||
constexpr ALWAYS_INLINE ClassTokenType GetClassToken() const { return this->class_token; }
|
||||
|
||||
constexpr ALWAYS_INLINE bool operator==(const TypeObj &rhs) {
|
||||
return this->GetClassToken() == rhs.GetClassToken();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool operator!=(const TypeObj &rhs) {
|
||||
return this->GetClassToken() != rhs.GetClassToken();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) {
|
||||
return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken();
|
||||
}
|
||||
};
|
||||
private:
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
|
||||
private:
|
||||
std::atomic<u32> ref_count;
|
||||
public:
|
||||
static KAutoObject *Create(KAutoObject *ptr);
|
||||
public:
|
||||
constexpr ALWAYS_INLINE explicit KAutoObject() : ref_count(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||
virtual ~KAutoObject() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
/* Destroy is responsible for destroying the auto object's resources when ref_count hits zero. */
|
||||
virtual void Destroy() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
/* Finalize is responsible for cleaning up resource, but does not destroy the object. */
|
||||
virtual void Finalize() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
virtual KProcess *GetOwner() const { return nullptr; }
|
||||
|
||||
u32 GetReferenceCount() const {
|
||||
return this->ref_count;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const {
|
||||
return this->GetTypeObj().IsDerivedFrom(rhs);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool IsDerivedFrom(const KAutoObject &rhs) const {
|
||||
return this->IsDerivedFrom(rhs.GetTypeObj());
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
ALWAYS_INLINE Derived DynamicCast() {
|
||||
static_assert(std::is_pointer<Derived>::value);
|
||||
using DerivedType = typename std::remove_pointer<Derived>::type;
|
||||
|
||||
if (AMS_LIKELY(this->IsDerivedFrom(DerivedType::GetStaticTypeObj()))) {
|
||||
return static_cast<Derived>(this);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
ALWAYS_INLINE const Derived DynamicCast() const {
|
||||
static_assert(std::is_pointer<Derived>::value);
|
||||
using DerivedType = typename std::remove_pointer<Derived>::type;
|
||||
|
||||
if (AMS_LIKELY(this->IsDerivedFrom(DerivedType::GetStaticTypeObj()))) {
|
||||
return static_cast<Derived>(this);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool Open() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Atomically increment the reference count, only if it's positive. */
|
||||
u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire);
|
||||
do {
|
||||
if (AMS_UNLIKELY(cur_ref_count == 0)) {
|
||||
return false;
|
||||
}
|
||||
MESOSPHERE_ABORT_UNLESS(cur_ref_count < cur_ref_count + 1);
|
||||
} while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void Close() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Atomically decrement the reference count, not allowing it to become negative. */
|
||||
u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire);
|
||||
do {
|
||||
MESOSPHERE_ABORT_UNLESS(cur_ref_count > 0);
|
||||
} while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed));
|
||||
|
||||
/* If ref count hits zero, destroy the object. */
|
||||
if (cur_ref_count - 1 == 0) {
|
||||
this->Destroy();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class KAutoObjectWithListContainer;
|
||||
|
||||
class KAutoObjectWithList : public KAutoObject {
|
||||
private:
|
||||
friend class KAutoObjectWithListContainer;
|
||||
private:
|
||||
util::IntrusiveRedBlackTreeNode list_node;
|
||||
public:
|
||||
static ALWAYS_INLINE int Compare(const KAutoObjectWithList &lhs, const KAutoObjectWithList &rhs) {
|
||||
const u64 lid = lhs.GetId();
|
||||
const u64 rid = rhs.GetId();
|
||||
|
||||
if (lid < rid) {
|
||||
return -1;
|
||||
} else if (lid > rid) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
public:
|
||||
virtual u64 GetId() const {
|
||||
return reinterpret_cast<u64>(this);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class KScopedAutoObject {
|
||||
static_assert(std::is_base_of<KAutoObject, T>::value);
|
||||
NON_COPYABLE(KScopedAutoObject);
|
||||
private:
|
||||
T *obj;
|
||||
private:
|
||||
constexpr ALWAYS_INLINE void Swap(KScopedAutoObject &rhs) {
|
||||
/* TODO: C++20 constexpr std::swap */
|
||||
T *tmp = rhs.obj;
|
||||
rhs.obj = this->obj;
|
||||
this->obj = tmp;
|
||||
}
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KScopedAutoObject() : obj(nullptr) { /* ... */ }
|
||||
constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : obj(o) {
|
||||
if (this->obj != nullptr) {
|
||||
this->obj->Open();
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE ~KScopedAutoObject() {
|
||||
if (this->obj != nullptr) {
|
||||
this->obj->Close();
|
||||
}
|
||||
this->obj = nullptr;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KScopedAutoObject(KScopedAutoObject &&rhs) {
|
||||
this->obj = rhs.obj;
|
||||
rhs.obj = nullptr;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KScopedAutoObject &operator=(KScopedAutoObject &&rhs) {
|
||||
rhs.Swap(*this);
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE T *operator->() { return this->obj; }
|
||||
constexpr ALWAYS_INLINE T &operator*() { return *this->obj; }
|
||||
|
||||
constexpr ALWAYS_INLINE void Reset(T *o) {
|
||||
KScopedAutoObject(o).Swap(*this);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsNull() const { return this->obj == nullptr; }
|
||||
constexpr ALWAYS_INLINE bool IsNotNull() const { return this->obj != nullptr; }
|
||||
};
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_k_light_lock.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KAutoObjectWithListContainer {
|
||||
NON_COPYABLE(KAutoObjectWithListContainer);
|
||||
NON_MOVEABLE(KAutoObjectWithListContainer);
|
||||
private:
|
||||
using ListType = util::IntrusiveRedBlackTreeMemberTraits<&KAutoObjectWithList::list_node>::TreeType<KAutoObjectWithList>;
|
||||
public:
|
||||
class ListAccessor : public KScopedLightLock {
|
||||
private:
|
||||
ListType &list;
|
||||
public:
|
||||
explicit ListAccessor(KAutoObjectWithListContainer *container) : KScopedLightLock(container->lock), list(container->object_list) { /* ... */ }
|
||||
explicit ListAccessor(KAutoObjectWithListContainer &container) : KScopedLightLock(container.lock), list(container.object_list) { /* ... */ }
|
||||
|
||||
typename ListType::iterator begin() const {
|
||||
return this->list.begin();
|
||||
}
|
||||
|
||||
typename ListType::iterator end() const {
|
||||
return this->list.end();
|
||||
}
|
||||
|
||||
typename ListType::iterator find(typename ListType::const_reference ref) const {
|
||||
return this->list.find(ref);
|
||||
}
|
||||
};
|
||||
|
||||
friend class ListAccessor;
|
||||
private:
|
||||
KLightLock lock;
|
||||
ListType object_list;
|
||||
public:
|
||||
constexpr KAutoObjectWithListContainer() : lock(), object_list() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
void Initialize() { MESOSPHERE_ASSERT_THIS(); }
|
||||
void Finalize() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
Result Register(KAutoObjectWithList *obj);
|
||||
Result Unregister(KAutoObjectWithList *obj);
|
||||
size_t GetOwnedCount(KProcess *owner);
|
||||
};
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,280 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
#include <mesosphere/kern_select_page_table.hpp>
|
||||
#include <mesosphere/kern_svc.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KCapabilities {
|
||||
private:
|
||||
static constexpr size_t SvcFlagCount = svc::NumSupervisorCalls / BITSIZEOF(u8);
|
||||
static constexpr size_t IrqFlagCount = /* TODO */0x80;
|
||||
|
||||
enum class CapabilityType : u32 {
|
||||
CorePriority = (1u << 3) - 1,
|
||||
SyscallMask = (1u << 4) - 1,
|
||||
MapRange = (1u << 6) - 1,
|
||||
MapIoPage = (1u << 7) - 1,
|
||||
MapRegion = (1u << 10) - 1,
|
||||
InterruptPair = (1u << 11) - 1,
|
||||
ProgramType = (1u << 13) - 1,
|
||||
KernelVersion = (1u << 14) - 1,
|
||||
HandleTable = (1u << 15) - 1,
|
||||
DebugFlags = (1u << 16) - 1,
|
||||
|
||||
Invalid = 0u,
|
||||
Padding = ~0u,
|
||||
};
|
||||
|
||||
using RawCapabilityValue = util::BitPack32::Field<0, BITSIZEOF(util::BitPack32), u32>;
|
||||
|
||||
static constexpr CapabilityType GetCapabilityType(const util::BitPack32 cap) {
|
||||
const u32 value = cap.Get<RawCapabilityValue>();
|
||||
return static_cast<CapabilityType>((~value & (value + 1)) - 1);
|
||||
}
|
||||
|
||||
static constexpr u32 GetCapabilityFlag(CapabilityType type) {
|
||||
return static_cast<u32>(type) + 1;
|
||||
}
|
||||
|
||||
static constexpr u32 CountTrailingZero(u32 flag) {
|
||||
for (u32 i = 0; i < BITSIZEOF(u32); i++) {
|
||||
if (flag & (1u << i)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return BITSIZEOF(u32);
|
||||
}
|
||||
|
||||
static constexpr u32 GetCapabilityId(CapabilityType type) {
|
||||
const u32 flag = GetCapabilityFlag(type);
|
||||
if (true /* C++20: std::is_constant_evaluated() */) {
|
||||
return CountTrailingZero(flag);
|
||||
} else {
|
||||
return static_cast<u32>(__builtin_ctz(flag));
|
||||
}
|
||||
}
|
||||
|
||||
template<size_t Index, size_t Count, typename T = u32>
|
||||
using Field = util::BitPack32::Field<Index, Count, T>;
|
||||
|
||||
#define DEFINE_FIELD(name, prev, ...) using name = Field<prev::Next, __VA_ARGS__>
|
||||
|
||||
template<CapabilityType Type>
|
||||
static constexpr inline u32 CapabilityFlag = []() -> u32 {
|
||||
return static_cast<u32>(Type) + 1;
|
||||
}();
|
||||
|
||||
template<CapabilityType Type>
|
||||
static constexpr inline u32 CapabilityId = []() -> u32 {
|
||||
const u32 flag = static_cast<u32>(Type) + 1;
|
||||
if (true /* C++20: std::is_constant_evaluated() */) {
|
||||
for (u32 i = 0; i < BITSIZEOF(u32); i++) {
|
||||
if (flag & (1u << i)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return BITSIZEOF(u32);
|
||||
} else {
|
||||
return __builtin_ctz(flag);
|
||||
}
|
||||
}();
|
||||
|
||||
struct CorePriority {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::CorePriority> + 1>;
|
||||
|
||||
DEFINE_FIELD(LowestThreadPriority, IdBits, 6);
|
||||
DEFINE_FIELD(HighestThreadPriority, LowestThreadPriority, 6);
|
||||
DEFINE_FIELD(MinimumCoreId, HighestThreadPriority, 8);
|
||||
DEFINE_FIELD(MaximumCoreId, MinimumCoreId, 8);
|
||||
};
|
||||
|
||||
struct SyscallMask {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::SyscallMask> + 1>;
|
||||
|
||||
DEFINE_FIELD(Mask, IdBits, 24);
|
||||
DEFINE_FIELD(Index, Mask, 3);
|
||||
};
|
||||
|
||||
static constexpr u64 PhysicalMapAllowedMask = (1ul << 36) - 1;
|
||||
|
||||
struct MapRange {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::MapRange> + 1>;
|
||||
|
||||
DEFINE_FIELD(Address, IdBits, 24);
|
||||
DEFINE_FIELD(ReadOnly, Address, 1, bool);
|
||||
};
|
||||
|
||||
struct MapRangeSize {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::MapRange> + 1>;
|
||||
|
||||
DEFINE_FIELD(Pages, IdBits, 20);
|
||||
DEFINE_FIELD(Reserved, Pages, 4);
|
||||
DEFINE_FIELD(Normal, Reserved, 1, bool);
|
||||
};
|
||||
|
||||
struct MapIoPage {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::MapIoPage> + 1>;
|
||||
|
||||
DEFINE_FIELD(Address, IdBits, 24);
|
||||
};
|
||||
|
||||
enum class RegionType : u32 {
|
||||
None = 0,
|
||||
KernelTraceBuffer = 1,
|
||||
OnMemoryBootImage = 2,
|
||||
DTB = 3,
|
||||
};
|
||||
|
||||
struct MapRegion {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::MapRegion> + 1>;
|
||||
|
||||
DEFINE_FIELD(Region0, IdBits, 6, RegionType);
|
||||
DEFINE_FIELD(ReadOnly0, Region0, 1, bool);
|
||||
DEFINE_FIELD(Region1, ReadOnly0, 6, RegionType);
|
||||
DEFINE_FIELD(ReadOnly1, Region1, 1, bool);
|
||||
DEFINE_FIELD(Region2, ReadOnly1, 6, RegionType);
|
||||
DEFINE_FIELD(ReadOnly2, Region2, 1, bool);
|
||||
};
|
||||
|
||||
static const u32 PaddingInterruptId = 0x3FF;
|
||||
|
||||
struct InterruptPair {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::InterruptPair> + 1>;
|
||||
|
||||
DEFINE_FIELD(InterruptId0, IdBits, 10);
|
||||
DEFINE_FIELD(InterruptId1, InterruptId0, 10);
|
||||
};
|
||||
|
||||
|
||||
struct ProgramType {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::ProgramType> + 1>;
|
||||
|
||||
DEFINE_FIELD(Type, IdBits, 3);
|
||||
DEFINE_FIELD(Reserved, Type, 15);
|
||||
};
|
||||
|
||||
struct KernelVersion {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::KernelVersion> + 1>;
|
||||
|
||||
DEFINE_FIELD(MinorVersion, IdBits, 4);
|
||||
DEFINE_FIELD(MajorVersion, MinorVersion, 13);
|
||||
};
|
||||
|
||||
struct HandleTable {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::HandleTable> + 1>;
|
||||
|
||||
DEFINE_FIELD(Size, IdBits, 10);
|
||||
DEFINE_FIELD(Reserved, Size, 6);
|
||||
};
|
||||
|
||||
struct DebugFlags {
|
||||
using IdBits = Field<0, CapabilityId<CapabilityType::DebugFlags> + 1>;
|
||||
|
||||
DEFINE_FIELD(AllowDebug, IdBits, 1, bool);
|
||||
DEFINE_FIELD(ForceDebug, AllowDebug, 1, bool);
|
||||
DEFINE_FIELD(Reserved, ForceDebug, 13);
|
||||
};
|
||||
|
||||
#undef DEFINE_FIELD
|
||||
|
||||
static constexpr u32 InitializeOnceFlags = CapabilityFlag<CapabilityType::CorePriority> |
|
||||
CapabilityFlag<CapabilityType::ProgramType> |
|
||||
CapabilityFlag<CapabilityType::KernelVersion> |
|
||||
CapabilityFlag<CapabilityType::HandleTable> |
|
||||
CapabilityFlag<CapabilityType::DebugFlags>;
|
||||
private:
|
||||
u8 svc_access_flags[SvcFlagCount]{};
|
||||
u8 irq_access_flags[IrqFlagCount]{};
|
||||
u64 core_mask{};
|
||||
u64 priority_mask{};
|
||||
util::BitPack32 debug_capabilities;
|
||||
s32 handle_table_size{};
|
||||
util::BitPack32 intended_kernel_version;
|
||||
u32 program_type{};
|
||||
private:
|
||||
static constexpr ALWAYS_INLINE void SetSvcAllowedImpl(u8 *data, u32 id) {
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(*data);
|
||||
MESOSPHERE_ASSERT(id < svc::SvcId_Count);
|
||||
data[id / BitsPerWord] |= (1ul << (id % BitsPerWord));
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE void ClearSvcAllowedImpl(u8 *data, u32 id) {
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(*data);
|
||||
MESOSPHERE_ASSERT(id < svc::SvcId_Count);
|
||||
data[id / BitsPerWord] &= ~(1ul << (id % BitsPerWord));
|
||||
}
|
||||
|
||||
bool SetSvcAllowed(u32 id) {
|
||||
if (id < BITSIZEOF(this->svc_access_flags)) {
|
||||
SetSvcAllowedImpl(this->svc_access_flags, id);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool SetInterruptAllowed(u32 id) {
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]);
|
||||
if (id < BITSIZEOF(this->irq_access_flags)) {
|
||||
this->irq_access_flags[id / BitsPerWord] = (1ul << (id % BitsPerWord));
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Result SetCorePriorityCapability(const util::BitPack32 cap);
|
||||
Result SetSyscallMaskCapability(const util::BitPack32 cap, u32 &set_svc);
|
||||
Result MapRange(const util::BitPack32 cap, const util::BitPack32 size_cap, KProcessPageTable *page_table);
|
||||
Result MapIoPage(const util::BitPack32 cap, KProcessPageTable *page_table);
|
||||
Result MapRegion(const util::BitPack32 cap, KProcessPageTable *page_table);
|
||||
Result SetInterruptPairCapability(const util::BitPack32 cap);
|
||||
Result SetProgramTypeCapability(const util::BitPack32 cap);
|
||||
Result SetKernelVersionCapability(const util::BitPack32 cap);
|
||||
Result SetHandleTableCapability(const util::BitPack32 cap);
|
||||
Result SetDebugFlagsCapability(const util::BitPack32 cap);
|
||||
|
||||
Result SetCapability(const util::BitPack32 cap, u32 &set_flags, u32 &set_svc, KProcessPageTable *page_table);
|
||||
Result SetCapabilities(const u32 *caps, s32 num_caps, KProcessPageTable *page_table);
|
||||
public:
|
||||
constexpr KCapabilities() : debug_capabilities(0), intended_kernel_version(0) { /* ... */ }
|
||||
|
||||
Result Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table);
|
||||
|
||||
constexpr u64 GetCoreMask() const { return this->core_mask; }
|
||||
constexpr u64 GetPriorityMask() const { return this->priority_mask; }
|
||||
constexpr s32 GetHandleTableSize() const { return this->handle_table_size; }
|
||||
|
||||
ALWAYS_INLINE void CopySvcPermissionsTo(KThread::StackParameters &sp) const {
|
||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
||||
std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags));
|
||||
|
||||
/* Clear specific SVCs based on our state. */
|
||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState);
|
||||
if (sp.is_preemption_state_pinned) {
|
||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo);
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Member functions. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KAutoObject;
|
||||
|
||||
class KClassTokenGenerator {
|
||||
public:
|
||||
using TokenBaseType = u16;
|
||||
public:
|
||||
static constexpr size_t BaseClassBits = 8;
|
||||
static constexpr size_t FinalClassBits = (sizeof(TokenBaseType) * CHAR_BIT) - BaseClassBits;
|
||||
/* One bit per base class. */
|
||||
static constexpr size_t NumBaseClasses = BaseClassBits;
|
||||
/* Final classes are permutations of three bits. */
|
||||
static constexpr size_t NumFinalClasses = [] {
|
||||
TokenBaseType index = 0;
|
||||
for (size_t i = 0; i < FinalClassBits; i++) {
|
||||
for (size_t j = i + 1; j < FinalClassBits; j++) {
|
||||
for (size_t k = j + 1; k < FinalClassBits; k++) {
|
||||
index++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return index;
|
||||
}();
|
||||
private:
|
||||
template<TokenBaseType Index>
|
||||
static constexpr inline TokenBaseType BaseClassToken = BIT(Index);
|
||||
|
||||
template<TokenBaseType Index>
|
||||
static constexpr inline TokenBaseType FinalClassToken = [] {
|
||||
TokenBaseType index = 0;
|
||||
for (size_t i = 0; i < FinalClassBits; i++) {
|
||||
for (size_t j = i + 1; j < FinalClassBits; j++) {
|
||||
for (size_t k = j + 1; k < FinalClassBits; k++) {
|
||||
if ((index++) == Index) {
|
||||
return ((1ul << i) | (1ul << j) | (1ul << k)) << BaseClassBits;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}();
|
||||
|
||||
template<typename T>
|
||||
static constexpr inline TokenBaseType GetClassToken() {
|
||||
static_assert(std::is_base_of<KAutoObject, T>::value);
|
||||
if constexpr (std::is_same<T, KAutoObject>::value) {
|
||||
static_assert(T::ObjectType == ObjectType::KAutoObject);
|
||||
return 0;
|
||||
} else if constexpr (!std::is_final<T>::value) {
|
||||
static_assert(ObjectType::BaseClassesStart <= T::ObjectType && T::ObjectType < ObjectType::BaseClassesEnd);
|
||||
constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - static_cast<TokenBaseType>(ObjectType::BaseClassesStart);
|
||||
return BaseClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>();
|
||||
} else if constexpr (ObjectType::FinalClassesStart <= T::ObjectType && T::ObjectType < ObjectType::FinalClassesEnd) {
|
||||
constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - static_cast<TokenBaseType>(ObjectType::FinalClassesStart);
|
||||
return FinalClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>();
|
||||
} else {
|
||||
static_assert(!std::is_same<T, T>::value, "GetClassToken: Invalid Type");
|
||||
}
|
||||
};
|
||||
public:
|
||||
enum class ObjectType {
|
||||
KAutoObject,
|
||||
|
||||
BaseClassesStart,
|
||||
|
||||
KSynchronizationObject = BaseClassesStart,
|
||||
KReadableEvent,
|
||||
|
||||
BaseClassesEnd,
|
||||
|
||||
FinalClassesStart = BaseClassesEnd,
|
||||
|
||||
KInterruptEvent = FinalClassesStart,
|
||||
KDebug,
|
||||
KThread,
|
||||
KServerPort,
|
||||
KServerSession,
|
||||
KClientPort,
|
||||
KClientSession,
|
||||
KProcess,
|
||||
KResourceLimit,
|
||||
KLightSession,
|
||||
KPort,
|
||||
KSession,
|
||||
KSharedMemory,
|
||||
KEvent,
|
||||
KWritableEvent,
|
||||
KLightClientSession,
|
||||
KLightServerSession,
|
||||
KTransferMemory,
|
||||
KDeviceAddressSpace,
|
||||
KSessionRequest,
|
||||
KCodeMemory,
|
||||
|
||||
FinalClassesEnd = FinalClassesStart + NumFinalClasses,
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
static constexpr inline TokenBaseType ClassToken = GetClassToken<T>();
|
||||
};
|
||||
|
||||
using ClassTokenType = KClassTokenGenerator::TokenBaseType;
|
||||
|
||||
template<typename T>
|
||||
static constexpr inline ClassTokenType ClassToken = KClassTokenGenerator::ClassToken<T>;
|
||||
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KCodeMemory final : public KAutoObjectWithSlabHeapAndContainer<KCodeMemory, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
#include <mesosphere/kern_k_scheduler.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
struct KConditionVariableComparator {
|
||||
static constexpr ALWAYS_INLINE int Compare(const KThread &lhs, const KThread &rhs) {
|
||||
const uintptr_t l_key = lhs.GetConditionVariableKey();
|
||||
const uintptr_t r_key = rhs.GetConditionVariableKey();
|
||||
|
||||
if (l_key < r_key) {
|
||||
/* Sort first by key */
|
||||
return -1;
|
||||
} else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
|
||||
/* And then by priority. */
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class KConditionVariable {
|
||||
public:
|
||||
using ThreadTree = util::IntrusiveRedBlackTreeMemberTraits<&KThread::condvar_arbiter_tree_node>::TreeType<KConditionVariableComparator>;
|
||||
private:
|
||||
ThreadTree tree;
|
||||
public:
|
||||
constexpr KConditionVariable() : tree() { /* ... */ }
|
||||
|
||||
/* Arbitration. */
|
||||
Result SignalToAddress(KProcessAddress addr);
|
||||
Result WaitForAddress(ams::svc::Handle handle, KProcessAddress addr, u32 value);
|
||||
|
||||
/* Condition variable. */
|
||||
void Signal(uintptr_t cv_key, s32 count);
|
||||
Result Wait(KProcessAddress addr, uintptr_t key, u32 value, s64 timeout);
|
||||
|
||||
ALWAYS_INLINE void BeforeUpdatePriority(KThread *thread) {
|
||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
|
||||
this->tree.erase(this->tree.iterator_to(*thread));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void AfterUpdatePriority(KThread *thread) {
|
||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
|
||||
this->tree.insert(*thread);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_current_context.hpp>
|
||||
#include <mesosphere/kern_k_scheduler.hpp>
|
||||
#include <mesosphere/kern_k_interrupt_task_manager.hpp>
|
||||
#include <mesosphere/kern_select_interrupt_manager.hpp>
|
||||
#include <mesosphere/kern_select_hardware_timer.hpp>
|
||||
#include <mesosphere/kern_k_memory_manager.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
struct KCoreLocalContext {
|
||||
KCurrentContext current;
|
||||
KScheduler scheduler;
|
||||
KInterruptTaskManager interrupt_task_manager;
|
||||
KInterruptManager interrupt_manager;
|
||||
KHardwareTimer hardware_timer;
|
||||
/* Everything after this point is for debugging. */
|
||||
/* Retail kernel doesn't even consistently update these fields. */
|
||||
u64 num_sw_interrupts;
|
||||
u64 num_hw_interrupts;
|
||||
std::atomic<u64> num_svc;
|
||||
u64 num_process_switches;
|
||||
u64 num_thread_switches;
|
||||
u64 num_fpu_switches;
|
||||
u64 num_scheduler_updates;
|
||||
u64 num_invoked_scheduler_updates;
|
||||
std::atomic<u64> num_specific_svc[0x80];
|
||||
u32 perf_counters[6];
|
||||
};
|
||||
static_assert(sizeof(KCoreLocalContext) < PageSize);
|
||||
|
||||
struct KCoreLocalPage {
|
||||
KCoreLocalContext context;
|
||||
u8 padding[PageSize - sizeof(KCoreLocalContext)];
|
||||
};
|
||||
static_assert(sizeof(KCoreLocalPage) == PageSize);
|
||||
|
||||
struct KCoreLocalRegion {
|
||||
KCoreLocalPage current;
|
||||
KCoreLocalPage absolute[cpu::NumCores];
|
||||
};
|
||||
static_assert(sizeof(KCoreLocalRegion) == PageSize * (1 + cpu::NumCores));
|
||||
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KThread;
|
||||
class KProcess;
|
||||
class KScheduler;
|
||||
class KInterruptTaskManager;
|
||||
|
||||
struct KCurrentContext {
|
||||
std::atomic<KThread *> current_thread;
|
||||
std::atomic<KProcess *> current_process;
|
||||
KScheduler *scheduler;
|
||||
KInterruptTaskManager *interrupt_task_manager;
|
||||
s32 core_id;
|
||||
void *exception_stack_top;
|
||||
};
|
||||
static_assert(std::is_pod<KCurrentContext>::value);
|
||||
static_assert(sizeof(KCurrentContext) <= cpu::DataCacheLineSize);
|
||||
|
||||
namespace impl {
|
||||
|
||||
ALWAYS_INLINE KCurrentContext &GetCurrentContext() {
|
||||
return *reinterpret_cast<KCurrentContext *>(cpu::GetCoreLocalRegionAddress());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KThread *GetCurrentThreadPointer() {
|
||||
return impl::GetCurrentContext().current_thread.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KThread &GetCurrentThread() {
|
||||
return *GetCurrentThreadPointer();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KProcess *GetCurrentProcessPointer() {
|
||||
return impl::GetCurrentContext().current_process.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KProcess &GetCurrentProcess() {
|
||||
return *GetCurrentProcessPointer();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KScheduler *GetCurrentSchedulerPointer() {
|
||||
return impl::GetCurrentContext().scheduler;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KScheduler &GetCurrentScheduler() {
|
||||
return *GetCurrentSchedulerPointer();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KInterruptTaskManager *GetCurrentInterruptTaskManagerPointer() {
|
||||
return impl::GetCurrentContext().interrupt_task_manager;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KInterruptTaskManager &GetCurrentInterruptTaskManager() {
|
||||
return *GetCurrentInterruptTaskManagerPointer();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE s32 GetCurrentCoreId() {
|
||||
return impl::GetCurrentContext().core_id;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SetCurrentThread(KThread *new_thread) {
|
||||
impl::GetCurrentContext().current_thread = new_thread;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SetCurrentProcess(KProcess *new_process) {
|
||||
impl::GetCurrentContext().current_process = new_process;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_synchronization_object.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KDebugBase : public KSynchronizationObject {
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_select_device_page_table.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KDeviceAddressSpace final : public KAutoObjectWithSlabHeapAndContainer<KDeviceAddressSpace, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KDeviceAddressSpace, KAutoObject);
|
||||
public:
|
||||
static void Initialize();
|
||||
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KDpcManager {
|
||||
private:
|
||||
static constexpr s32 DpcManagerNormalThreadPriority = 59;
|
||||
static constexpr s32 DpcManagerPreemptionThreadPriority = 63;
|
||||
|
||||
static_assert(ams::svc::HighestThreadPriority <= DpcManagerNormalThreadPriority && DpcManagerNormalThreadPriority <= ams::svc::LowestThreadPriority);
|
||||
static_assert(ams::svc::HighestThreadPriority <= DpcManagerPreemptionThreadPriority && DpcManagerPreemptionThreadPriority <= ams::svc::LowestThreadPriority);
|
||||
private:
|
||||
static NOINLINE void Initialize(s32 core_id, s32 priority);
|
||||
public:
|
||||
static void Initialize() {
|
||||
const s32 core_id = GetCurrentCoreId();
|
||||
if (core_id == static_cast<s32>(cpu::NumCores) - 1) {
|
||||
Initialize(core_id, DpcManagerPreemptionThreadPriority);
|
||||
} else {
|
||||
Initialize(core_id, DpcManagerNormalThreadPriority);
|
||||
}
|
||||
}
|
||||
|
||||
static NOINLINE void HandleDpc();
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_slab_heap.hpp>
|
||||
#include <mesosphere/kern_k_page_group.hpp>
|
||||
#include <mesosphere/kern_k_memory_block.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class DynamicSlabHeapPage {
|
||||
private:
|
||||
u8 buffer[PageSize];
|
||||
};
|
||||
static_assert(sizeof(DynamicSlabHeapPage) == PageSize);
|
||||
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class KDynamicSlabHeap {
|
||||
NON_COPYABLE(KDynamicSlabHeap);
|
||||
NON_MOVEABLE(KDynamicSlabHeap);
|
||||
private:
|
||||
using Impl = impl::KSlabHeapImpl;
|
||||
using PageBuffer = impl::DynamicSlabHeapPage;
|
||||
private:
|
||||
Impl impl;
|
||||
KDynamicSlabHeap<PageBuffer> *next_allocator;
|
||||
std::atomic<size_t> used;
|
||||
std::atomic<size_t> peak;
|
||||
std::atomic<size_t> count;
|
||||
KVirtualAddress address;
|
||||
size_t size;
|
||||
private:
|
||||
ALWAYS_INLINE Impl *GetImpl() {
|
||||
return std::addressof(this->impl);
|
||||
}
|
||||
ALWAYS_INLINE const Impl *GetImpl() const {
|
||||
return std::addressof(this->impl);
|
||||
}
|
||||
public:
|
||||
constexpr KDynamicSlabHeap() : impl(), next_allocator(), used(), peak(), count(), address(), size() { /* ... */ }
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return this->address; }
|
||||
constexpr size_t GetSize() const { return this->size; }
|
||||
constexpr size_t GetUsed() const { return this->used; }
|
||||
constexpr size_t GetPeak() const { return this->peak; }
|
||||
constexpr size_t GetCount() const { return this->count; }
|
||||
|
||||
constexpr bool IsInRange(KVirtualAddress addr) const {
|
||||
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
|
||||
}
|
||||
|
||||
void Initialize(KVirtualAddress memory, size_t sz) {
|
||||
/* Set tracking fields. */
|
||||
this->address = memory;
|
||||
this->count = sz / sizeof(T);
|
||||
this->size = this->count * sizeof(T);
|
||||
|
||||
/* Free blocks to memory. */
|
||||
u8 *cur = GetPointer<u8>(this->address + this->size);
|
||||
for (size_t i = 0; i < this->count; i++) {
|
||||
cur -= sizeof(T);
|
||||
this->GetImpl()->Free(cur);
|
||||
}
|
||||
}
|
||||
|
||||
void Initialize(KDynamicSlabHeap<PageBuffer> *next) {
|
||||
this->next_allocator = next;
|
||||
this->address = next->GetAddress();
|
||||
this->size = next->GetSize();
|
||||
}
|
||||
|
||||
T *Allocate() {
|
||||
T *allocated = reinterpret_cast<T *>(this->GetImpl()->Allocate());
|
||||
|
||||
/* If we fail to allocate, try to get a new page from our next allocator. */
|
||||
if (AMS_UNLIKELY(allocated == nullptr)) {
|
||||
if (this->next_allocator != nullptr) {
|
||||
allocated = reinterpret_cast<T *>(this->next_allocator->Allocate());
|
||||
if (allocated != nullptr) {
|
||||
/* If we succeeded in getting a page, free the rest to our slab. */
|
||||
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||
this->GetImpl()->Free(allocated + i);
|
||||
}
|
||||
this->count += sizeof(PageBuffer) / sizeof(T);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (AMS_LIKELY(allocated != nullptr)) {
|
||||
/* Construct the object. */
|
||||
new (allocated) T();
|
||||
|
||||
/* Update our tracking. */
|
||||
size_t used = ++this->used;
|
||||
size_t peak = this->peak;
|
||||
while (peak < used) {
|
||||
if (this->peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allocated;
|
||||
}
|
||||
|
||||
void Free(T *t) {
|
||||
this->GetImpl()->Free(t);
|
||||
--this->used;
|
||||
}
|
||||
};
|
||||
|
||||
class KDynamicPageManager : public KDynamicSlabHeap<impl::DynamicSlabHeapPage>{};
|
||||
class KBlockInfoManager : public KDynamicSlabHeap<KBlockInfo>{};
|
||||
class KMemoryBlockSlabManager : public KDynamicSlabHeap<KMemoryBlock>{};
|
||||
|
||||
}
|
30
libraries/libmesosphere/include/mesosphere/kern_k_event.hpp
Normal file
30
libraries/libmesosphere/include/mesosphere/kern_k_event.hpp
Normal file
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_readable_event.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KEventInfo : public KSlabAllocated<KEventInfo>, public util::IntrusiveListBaseNode<KEventInfo> {
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#ifdef ATMOSPHERE_ARCH_ARM64
|
||||
#include <mesosphere/arch/arm64/kern_k_exception_context.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
using ams::kern::arch::arm64::KExceptionContext;
|
||||
}
|
||||
#else
|
||||
#error "Unknown architecture for KExceptionContext"
|
||||
#endif
|
|
@ -0,0 +1,315 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_k_spin_lock.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
#include <mesosphere/kern_k_interrupt_event.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
constexpr ALWAYS_INLINE util::BitPack32 GetHandleBitPack(ams::svc::Handle handle) {
|
||||
return util::BitPack32{handle};
|
||||
}
|
||||
|
||||
class KProcess;
|
||||
class KThread;
|
||||
|
||||
class KHandleTable {
|
||||
NON_COPYABLE(KHandleTable);
|
||||
NON_MOVEABLE(KHandleTable);
|
||||
public:
|
||||
static constexpr size_t MaxTableSize = 1024;
|
||||
private:
|
||||
using HandleRawValue = util::BitPack32::Field<0, BITSIZEOF(u32), u32>;
|
||||
using HandleEncoded = util::BitPack32::Field<0, BITSIZEOF(ams::svc::Handle), ams::svc::Handle>;
|
||||
|
||||
using HandleIndex = util::BitPack32::Field<0, 15, u16>;
|
||||
using HandleLinearId = util::BitPack32::Field<HandleIndex::Next, 15, u16>;
|
||||
using HandleReserved = util::BitPack32::Field<HandleLinearId::Next, 2, u32>;
|
||||
|
||||
static constexpr u16 MinLinearId = 1;
|
||||
static constexpr u16 MaxLinearId = util::BitPack32{std::numeric_limits<u32>::max()}.Get<HandleLinearId>();
|
||||
|
||||
static constexpr ALWAYS_INLINE ams::svc::Handle EncodeHandle(u16 index, u16 linear_id) {
|
||||
util::BitPack32 pack = {0};
|
||||
pack.Set<HandleIndex>(index);
|
||||
pack.Set<HandleLinearId>(linear_id);
|
||||
pack.Set<HandleReserved>(0);
|
||||
return pack.Get<HandleEncoded>();
|
||||
}
|
||||
|
||||
class Entry {
|
||||
private:
|
||||
union {
|
||||
struct {
|
||||
u16 linear_id;
|
||||
u16 type;
|
||||
} info;
|
||||
Entry *next_free_entry;
|
||||
} meta;
|
||||
KAutoObject *object;
|
||||
public:
|
||||
constexpr Entry() : meta(), object(nullptr) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE void SetFree(Entry *next) {
|
||||
this->object = nullptr;
|
||||
this->meta.next_free_entry = next;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetUsed(KAutoObject *obj, u16 linear_id, u16 type) {
|
||||
this->object = obj;
|
||||
this->meta.info = { linear_id, type };
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KAutoObject *GetObject() const { return this->object; }
|
||||
constexpr ALWAYS_INLINE Entry *GetNextFreeEntry() const { return this->meta.next_free_entry; }
|
||||
constexpr ALWAYS_INLINE u16 GetLinearId() const { return this->meta.info.linear_id; }
|
||||
constexpr ALWAYS_INLINE u16 GetType() const { return this->meta.info.type; }
|
||||
};
|
||||
private:
|
||||
mutable KSpinLock lock;
|
||||
Entry *table;
|
||||
Entry *free_head;
|
||||
Entry entries[MaxTableSize];
|
||||
u16 table_size;
|
||||
u16 max_count;
|
||||
u16 next_linear_id;
|
||||
u16 count;
|
||||
public:
|
||||
constexpr KHandleTable() :
|
||||
lock(), table(nullptr), free_head(nullptr), entries(), table_size(0), max_count(0), next_linear_id(MinLinearId), count(0)
|
||||
{ MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
constexpr NOINLINE Result Initialize(s32 size) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
R_UNLESS(size <= static_cast<s32>(MaxTableSize), svc::ResultOutOfMemory());
|
||||
|
||||
/* Initialize all fields. */
|
||||
this->table = this->entries;
|
||||
this->table_size = (size <= 0) ? MaxTableSize : size;
|
||||
this->next_linear_id = MinLinearId;
|
||||
this->count = 0;
|
||||
this->max_count = 0;
|
||||
|
||||
/* Free all entries. */
|
||||
for (size_t i = 0; i < static_cast<size_t>(this->table_size - 1); i++) {
|
||||
this->entries[i].SetFree(std::addressof(this->entries[i + 1]));
|
||||
}
|
||||
this->entries[this->table_size - 1].SetFree(nullptr);
|
||||
|
||||
this->free_head = std::addressof(this->entries[0]);
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetTableSize() const { return this->table_size; }
|
||||
constexpr ALWAYS_INLINE size_t GetCount() const { return this->count; }
|
||||
constexpr ALWAYS_INLINE size_t GetMaxCount() const { return this->max_count; }
|
||||
|
||||
NOINLINE Result Finalize();
|
||||
NOINLINE bool Remove(ams::svc::Handle handle);
|
||||
|
||||
template<typename T = KAutoObject>
|
||||
ALWAYS_INLINE KScopedAutoObject<T> GetObject(ams::svc::Handle handle) const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Handle pseudo-handles. */
|
||||
if constexpr (std::is_same<T, KProcess>::value) {
|
||||
if (handle == ams::svc::PseudoHandle::CurrentProcess) {
|
||||
return GetCurrentProcessPointer();
|
||||
}
|
||||
} else if constexpr (std::is_same<T, KThread>::value) {
|
||||
if (handle == ams::svc::PseudoHandle::CurrentThread) {
|
||||
return GetCurrentThreadPointer();
|
||||
}
|
||||
}
|
||||
|
||||
/* Lock and look up in table. */
|
||||
KScopedDisableDispatch dd;
|
||||
KScopedSpinLock lk(this->lock);
|
||||
|
||||
if constexpr (std::is_same<T, KAutoObject>::value) {
|
||||
return this->GetObjectImpl(handle);
|
||||
} else {
|
||||
return this->GetObjectImpl(handle)->DynamicCast<T*>();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T = KAutoObject>
|
||||
ALWAYS_INLINE KScopedAutoObject<T> GetObjectForIpc(ams::svc::Handle handle) const {
|
||||
static_assert(!std::is_base_of<KInterruptEvent, T>::value);
|
||||
|
||||
/* Handle pseudo-handles. */
|
||||
if constexpr (std::is_same<T, KProcess>::value) {
|
||||
if (handle == ams::svc::PseudoHandle::CurrentProcess) {
|
||||
return GetCurrentProcessPointer();
|
||||
}
|
||||
} else if constexpr (std::is_same<T, KThread>::value) {
|
||||
if (handle == ams::svc::PseudoHandle::CurrentThread) {
|
||||
return GetCurrentThreadPointer();
|
||||
}
|
||||
}
|
||||
|
||||
/* Lock and look up in table. */
|
||||
KScopedDisableDispatch dd;
|
||||
KScopedSpinLock lk(this->lock);
|
||||
|
||||
KAutoObject *obj = this->GetObjectImpl(handle);
|
||||
if (obj->DynamicCast<KInterruptEvent *>() != nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
if constexpr (std::is_same<T, KAutoObject>::value) {
|
||||
return obj;
|
||||
} else {
|
||||
return obj->DynamicCast<T*>();
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KScopedAutoObject<KAutoObject> GetObjectByIndex(ams::svc::Handle *out_handle, size_t index) const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
KScopedDisableDispatch dd;
|
||||
KScopedSpinLock lk(this->lock);
|
||||
|
||||
return this->GetObjectByIndexImpl(out_handle, index);
|
||||
}
|
||||
|
||||
NOINLINE Result Reserve(ams::svc::Handle *out_handle);
|
||||
NOINLINE void Unreserve(ams::svc::Handle handle);
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE Result Add(ams::svc::Handle *out_handle, T *obj) {
|
||||
static_assert(std::is_base_of<KAutoObject, T>::value);
|
||||
return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken());
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
ALWAYS_INLINE void Register(ams::svc::Handle handle, T *obj) {
|
||||
static_assert(std::is_base_of<KAutoObject, T>::value);
|
||||
return this->Add(handle, obj, obj->GetTypeObj().GetClassToken());
|
||||
}
|
||||
private:
|
||||
NOINLINE Result Add(ams::svc::Handle *out_handle, KAutoObject *obj, u16 type);
|
||||
NOINLINE void Register(ams::svc::Handle handle, KAutoObject *obj, u16 type);
|
||||
|
||||
constexpr ALWAYS_INLINE Entry *AllocateEntry() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(this->count < this->table_size);
|
||||
|
||||
Entry *entry = this->free_head;
|
||||
this->free_head = entry->GetNextFreeEntry();
|
||||
|
||||
this->count++;
|
||||
this->max_count = std::max(this->max_count, this->count);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void FreeEntry(Entry *entry) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(this->count > 0);
|
||||
|
||||
entry->SetFree(this->free_head);
|
||||
this->free_head = entry;
|
||||
|
||||
this->count--;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u16 AllocateLinearId() {
|
||||
const u16 id = this->next_linear_id++;
|
||||
if (this->next_linear_id > MaxLinearId) {
|
||||
this->next_linear_id = MinLinearId;
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetEntryIndex(Entry *entry) {
|
||||
const size_t index = entry - this->table;
|
||||
MESOSPHERE_ASSERT(index < this->table_size);
|
||||
return index;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Entry *FindEntry(ams::svc::Handle handle) const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Unpack the handle. */
|
||||
const auto handle_pack = GetHandleBitPack(handle);
|
||||
const auto raw_value = handle_pack.Get<HandleRawValue>();
|
||||
const auto index = handle_pack.Get<HandleIndex>();
|
||||
const auto linear_id = handle_pack.Get<HandleLinearId>();
|
||||
const auto reserved = handle_pack.Get<HandleReserved>();
|
||||
MESOSPHERE_ASSERT(reserved == 0);
|
||||
|
||||
/* Validate our indexing information. */
|
||||
if (raw_value == 0) {
|
||||
return nullptr;
|
||||
}
|
||||
if (linear_id == 0) {
|
||||
return nullptr;
|
||||
}
|
||||
if (index >= this->table_size) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/* Get the entry, and ensure our serial id is correct. */
|
||||
Entry *entry = std::addressof(this->table[index]);
|
||||
if (entry->GetObject() == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
if (entry->GetLinearId() != linear_id) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
constexpr NOINLINE KAutoObject *GetObjectImpl(ams::svc::Handle handle) const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Handles must not have reserved bits set. */
|
||||
if (GetHandleBitPack(handle).Get<HandleReserved>() != 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (Entry *entry = this->FindEntry(handle); entry != nullptr) {
|
||||
return entry->GetObject();
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr NOINLINE KAutoObject *GetObjectByIndexImpl(ams::svc::Handle *out_handle, size_t index) const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Index must be in bounds. */
|
||||
if (index >= this->table_size || this->table == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/* Ensure entry has an object. */
|
||||
Entry *entry = std::addressof(this->table[index]);
|
||||
if (entry->GetObject() == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
*out_handle = EncodeHandle(index, entry->GetLinearId());
|
||||
return entry->GetObject();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_k_spin_lock.hpp>
|
||||
#include <mesosphere/kern_k_timer_task.hpp>
|
||||
#include <mesosphere/kern_select_interrupt_manager.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KHardwareTimerBase {
|
||||
private:
|
||||
using TimerTaskTree = util::IntrusiveRedBlackTreeBaseTraits<KTimerTask>::TreeType<KTimerTask>;
|
||||
private:
|
||||
KSpinLock lock;
|
||||
TimerTaskTree task_tree;
|
||||
KTimerTask *next_task;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KHardwareTimerBase() : lock(), task_tree(), next_task(nullptr) { /* ... */ }
|
||||
private:
|
||||
ALWAYS_INLINE void RemoveTaskFromTree(KTimerTask *task) {
|
||||
/* Erase from the tree. */
|
||||
auto it = this->task_tree.erase(this->task_tree.iterator_to(*task));
|
||||
|
||||
/* Clear the task's scheduled time. */
|
||||
task->SetTime(0);
|
||||
|
||||
/* Update our next task if relevant. */
|
||||
if (this->next_task == task) {
|
||||
this->next_task = (it != this->task_tree.end()) ? std::addressof(*it) : nullptr;
|
||||
}
|
||||
}
|
||||
public:
|
||||
NOINLINE void CancelTask(KTimerTask *task) {
|
||||
KScopedDisableDispatch dd;
|
||||
KScopedSpinLock lk(this->lock);
|
||||
|
||||
if (const s64 task_time = task->GetTime(); task_time > 0) {
|
||||
this->RemoveTaskFromTree(task);
|
||||
}
|
||||
}
|
||||
protected:
|
||||
ALWAYS_INLINE KSpinLock &GetLock() { return this->lock; }
|
||||
|
||||
ALWAYS_INLINE s64 DoInterruptTaskImpl(s64 cur_time) {
|
||||
/* We want to handle all tasks, returning the next time that a task is scheduled. */
|
||||
while (true) {
|
||||
/* Get the next task. If there isn't one, return 0. */
|
||||
KTimerTask *task = this->next_task;
|
||||
if (task == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If the task needs to be done in the future, do it in the future and not now. */
|
||||
if (const s64 task_time = task->GetTime(); task_time > cur_time) {
|
||||
return task_time;
|
||||
}
|
||||
|
||||
/* Remove the task from the tree of tasks, and update our next task. */
|
||||
this->RemoveTaskFromTree(task);
|
||||
|
||||
/* Handle the task. */
|
||||
task->OnTimer();
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool RegisterAbsoluteTaskImpl(KTimerTask *task, s64 task_time) {
|
||||
MESOSPHERE_ASSERT(task_time > 0);
|
||||
|
||||
/* Set the task's time, and insert it into our tree. */
|
||||
task->SetTime(task_time);
|
||||
this->task_tree.insert(*task);
|
||||
|
||||
/* Update our next task if relevant. */
|
||||
if (this->next_task != nullptr && this->next_task->GetTime() <= task_time) {
|
||||
return false;
|
||||
}
|
||||
this->next_task = task;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_address_space_info.hpp>
|
||||
#include <mesosphere/kern_select_page_table.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KInitialProcessHeader {
|
||||
private:
|
||||
static constexpr u32 Magic = util::FourCC<'K','I','P','1'>::Code;
|
||||
private:
|
||||
u32 magic;
|
||||
u8 name[12];
|
||||
u64 program_id;
|
||||
u32 version;
|
||||
u8 priority;
|
||||
u8 ideal_core_id;
|
||||
u8 _1E;
|
||||
u8 flags;
|
||||
u32 rx_address;
|
||||
u32 rx_size;
|
||||
u32 rx_compressed_size;
|
||||
u32 affinity_mask;
|
||||
u32 ro_address;
|
||||
u32 ro_size;
|
||||
u32 ro_compressed_size;
|
||||
u32 stack_size;
|
||||
u32 rw_address;
|
||||
u32 rw_size;
|
||||
u32 rw_compressed_size;
|
||||
u32 _4C;
|
||||
u32 bss_address;
|
||||
u32 bss_size;
|
||||
u32 pad[(0x80 - 0x58) / sizeof(u32)];
|
||||
u32 capabilities[0x80 / sizeof(u32)];
|
||||
public:
|
||||
constexpr bool IsValid() const { return this->magic == Magic; }
|
||||
|
||||
constexpr void GetName(char *dst, size_t size) const {
|
||||
std::memset(dst, 0, size);
|
||||
std::memcpy(dst, this->name, std::min(sizeof(this->name), size));
|
||||
}
|
||||
|
||||
constexpr const u32 *GetCapabilities() const { return this->capabilities; }
|
||||
constexpr size_t GetNumCapabilities() const { return util::size(this->capabilities); }
|
||||
|
||||
constexpr u64 GetProgramId() const { return this->program_id; }
|
||||
constexpr u32 GetVersion() const { return this->version; }
|
||||
constexpr u8 GetPriority() const { return this->priority; }
|
||||
constexpr u8 GetIdealCoreId() const { return this->ideal_core_id; }
|
||||
|
||||
constexpr bool IsRxCompressed() const { return (this->flags & (1 << 0)); }
|
||||
constexpr bool IsRoCompressed() const { return (this->flags & (1 << 1)); }
|
||||
constexpr bool IsRwCompressed() const { return (this->flags & (1 << 2)); }
|
||||
constexpr bool Is64Bit() const { return (this->flags & (1 << 3)); }
|
||||
constexpr bool Is64BitAddressSpace() const { return (this->flags & (1 << 4)); }
|
||||
constexpr bool UsesSecureMemory() const { return (this->flags & (1 << 5)); }
|
||||
|
||||
constexpr u32 GetRxAddress() const { return this->rx_address; }
|
||||
constexpr u32 GetRxSize() const { return this->rx_size; }
|
||||
constexpr u32 GetRxCompressedSize() const { return this->rx_compressed_size; }
|
||||
constexpr u32 GetRoAddress() const { return this->ro_address; }
|
||||
constexpr u32 GetRoSize() const { return this->ro_size; }
|
||||
constexpr u32 GetRoCompressedSize() const { return this->ro_compressed_size; }
|
||||
constexpr u32 GetRwAddress() const { return this->rw_address; }
|
||||
constexpr u32 GetRwSize() const { return this->rw_size; }
|
||||
constexpr u32 GetRwCompressedSize() const { return this->rw_compressed_size; }
|
||||
constexpr u32 GetBssAddress() const { return this->bss_address; }
|
||||
constexpr u32 GetBssSize() const { return this->bss_size; }
|
||||
|
||||
constexpr u32 GetAffinityMask() const { return this->affinity_mask; }
|
||||
constexpr u32 GetStackSize() const { return this->stack_size; }
|
||||
};
|
||||
static_assert(sizeof(KInitialProcessHeader) == 0x100);
|
||||
|
||||
class KInitialProcessReader {
|
||||
private:
|
||||
KInitialProcessHeader *kip_header;
|
||||
public:
|
||||
constexpr KInitialProcessReader() : kip_header() { /* ... */ }
|
||||
|
||||
constexpr const u32 *GetCapabilities() const { return this->kip_header->GetCapabilities(); }
|
||||
constexpr size_t GetNumCapabilities() const { return this->kip_header->GetNumCapabilities(); }
|
||||
|
||||
constexpr size_t GetBinarySize() const {
|
||||
return sizeof(*kip_header) + this->kip_header->GetRxCompressedSize() + this->kip_header->GetRoCompressedSize() + this->kip_header->GetRwCompressedSize();
|
||||
}
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
if (const size_t bss_size = this->kip_header->GetBssSize(); bss_size != 0) {
|
||||
return this->kip_header->GetBssAddress() + this->kip_header->GetBssSize();
|
||||
} else {
|
||||
return this->kip_header->GetRwAddress() + this->kip_header->GetRwSize();
|
||||
}
|
||||
}
|
||||
|
||||
constexpr u8 GetPriority() const { return this->kip_header->GetPriority(); }
|
||||
constexpr u8 GetIdealCoreId() const { return this->kip_header->GetIdealCoreId(); }
|
||||
constexpr u32 GetAffinityMask() const { return this->kip_header->GetAffinityMask(); }
|
||||
constexpr u32 GetStackSize() const { return this->kip_header->GetStackSize(); }
|
||||
|
||||
constexpr bool Is64Bit() const { return this->kip_header->Is64Bit(); }
|
||||
constexpr bool Is64BitAddressSpace() const { return this->kip_header->Is64BitAddressSpace(); }
|
||||
constexpr bool UsesSecureMemory() const { return this->kip_header->UsesSecureMemory(); }
|
||||
|
||||
bool Attach(u8 *bin) {
|
||||
if (KInitialProcessHeader *header = reinterpret_cast<KInitialProcessHeader *>(bin); header->IsValid()) {
|
||||
this->kip_header = header;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const;
|
||||
Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const;
|
||||
Result SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const;
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_readable_event.hpp>
|
||||
#include <mesosphere/kern_k_interrupt_task.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KInterruptEventTask;
|
||||
|
||||
class KInterruptEvent final : public KAutoObjectWithSlabHeapAndContainer<KInterruptEvent, KReadableEvent> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KInterruptEvent, KReadableEvent);
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
class KInterruptEventTask : public KSlabAllocated<KInterruptEventTask>, public KInterruptTask {
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KInterruptTask;
|
||||
|
||||
class KInterruptHandler {
|
||||
public:
|
||||
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) = 0;
|
||||
};
|
||||
|
||||
class KInterruptTask : public KInterruptHandler {
|
||||
private:
|
||||
KInterruptTask *next_task;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KInterruptTask() : next_task(nullptr) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE KInterruptTask *GetNextTask() const {
|
||||
return this->next_task;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetNextTask(KInterruptTask *t) {
|
||||
this->next_task = t;
|
||||
}
|
||||
|
||||
virtual void DoTask() = 0;
|
||||
};
|
||||
|
||||
static ALWAYS_INLINE KInterruptTask *GetDummyInterruptTask() {
|
||||
return reinterpret_cast<KInterruptTask *>(1);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_k_interrupt_task.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KThread;
|
||||
|
||||
class KInterruptTaskManager {
|
||||
private:
|
||||
class TaskQueue {
|
||||
private:
|
||||
KInterruptTask *head;
|
||||
KInterruptTask *tail;
|
||||
public:
|
||||
constexpr TaskQueue() : head(nullptr), tail(nullptr) { /* ... */ }
|
||||
|
||||
constexpr KInterruptTask *GetHead() { return this->head; }
|
||||
constexpr bool IsEmpty() const { return this->head == nullptr; }
|
||||
constexpr void Clear() { this->head = nullptr; this->tail = nullptr; }
|
||||
|
||||
void Enqueue(KInterruptTask *task);
|
||||
void Dequeue();
|
||||
};
|
||||
private:
|
||||
TaskQueue task_queue;
|
||||
KThread *thread;
|
||||
private:
|
||||
static void ThreadFunction(uintptr_t arg);
|
||||
void ThreadFunctionImpl();
|
||||
public:
|
||||
constexpr KInterruptTaskManager() : task_queue(), thread(nullptr) { /* ... */ }
|
||||
|
||||
constexpr KThread *GetThread() const { return this->thread; }
|
||||
|
||||
NOINLINE void Initialize();
|
||||
void EnqueueTask(KInterruptTask *task);
|
||||
|
||||
/* TODO: Actually implement KInterruptTaskManager. This is a placeholder. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_light_lock.hpp>
|
||||
#include <mesosphere/kern_k_thread_queue.hpp>
|
||||
#include <mesosphere/kern_k_scoped_scheduler_lock_and_sleep.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KLightConditionVariable {
|
||||
private:
|
||||
KThreadQueue thread_queue;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KLightConditionVariable() : thread_queue() { /* ... */ }
|
||||
private:
|
||||
void WaitImpl(KLightLock *lock, s64 timeout) {
|
||||
KThread *owner = GetCurrentThreadPointer();
|
||||
KHardwareTimer *timer;
|
||||
|
||||
/* Sleep the thread. */
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lk(&timer, owner, timeout);
|
||||
lock->Unlock();
|
||||
|
||||
if (!this->thread_queue.SleepThread(owner)) {
|
||||
lk.CancelSleep();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cancel the task that the sleep setup. */
|
||||
if (timer != nullptr) {
|
||||
timer->CancelTask(owner);
|
||||
}
|
||||
}
|
||||
public:
|
||||
void Wait(KLightLock *lock, s64 timeout = -1ll) {
|
||||
this->WaitImpl(lock, timeout);
|
||||
lock->Lock();
|
||||
}
|
||||
|
||||
void Broadcast() {
|
||||
KScopedSchedulerLock lk;
|
||||
while (this->thread_queue.WakeupFrontThread() != nullptr) {
|
||||
/* We want to signal all threads, and so should continue waking up until there's nothing to wake. */
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_current_context.hpp>
|
||||
#include <mesosphere/kern_k_scoped_lock.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KLightLock {
|
||||
private:
|
||||
std::atomic<uintptr_t> tag;
|
||||
public:
|
||||
constexpr KLightLock() : tag(0) { /* ... */ }
|
||||
|
||||
void Lock() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer());
|
||||
|
||||
while (true) {
|
||||
uintptr_t old_tag = this->tag.load(std::memory_order_relaxed);
|
||||
|
||||
while (!this->tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) {
|
||||
/* ... */
|
||||
}
|
||||
|
||||
if ((old_tag == 0) || ((old_tag | 1) == (cur_thread | 1))) {
|
||||
break;
|
||||
}
|
||||
|
||||
this->LockSlowPath(old_tag | 1, cur_thread);
|
||||
}
|
||||
}
|
||||
|
||||
void Unlock() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer());
|
||||
uintptr_t expected = cur_thread;
|
||||
if (!this->tag.compare_exchange_weak(expected, 0, std::memory_order_release)) {
|
||||
this->UnlockSlowPath(cur_thread);
|
||||
}
|
||||
}
|
||||
|
||||
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
||||
void UnlockSlowPath(uintptr_t cur_thread);
|
||||
|
||||
bool IsLocked() const { return this->tag != 0; }
|
||||
bool IsLockedByCurrentThread() const { return (this->tag | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); }
|
||||
};
|
||||
|
||||
using KScopedLightLock = KScopedLock<KLightLock>;
|
||||
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_synchronization_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KLightSession final : public KAutoObjectWithSlabHeapAndContainer<KLightSession, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KLightSession, KAutoObject);
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KLinkedListNode : public util::IntrusiveListBaseNode<KLinkedListNode>, public KSlabAllocated<KLinkedListNode> {
|
||||
private:
|
||||
void *item;
|
||||
public:
|
||||
constexpr KLinkedListNode() : util::IntrusiveListBaseNode<KLinkedListNode>(), item(nullptr) { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
constexpr void Initialize(void *it) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
this->item = it;
|
||||
}
|
||||
|
||||
constexpr void *GetItem() const {
|
||||
return this->item;
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(KLinkedListNode) == sizeof(util::IntrusiveListNode) + sizeof(void *));
|
||||
|
||||
template<typename T>
|
||||
class KLinkedList : private util::IntrusiveListBaseTraits<KLinkedListNode>::ListType {
|
||||
private:
|
||||
using BaseList = util::IntrusiveListBaseTraits<KLinkedListNode>::ListType;
|
||||
public:
|
||||
template<bool Const>
|
||||
class Iterator;
|
||||
|
||||
using value_type = T;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
using pointer = value_type *;
|
||||
using const_pointer = const value_type *;
|
||||
using reference = value_type &;
|
||||
using const_reference = const value_type &;
|
||||
using iterator = Iterator<false>;
|
||||
using const_iterator = Iterator<true>;
|
||||
using reverse_iterator = std::reverse_iterator<iterator>;
|
||||
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
|
||||
|
||||
template<bool Const>
|
||||
class Iterator {
|
||||
private:
|
||||
using BaseIterator = BaseList::Iterator<Const>;
|
||||
friend class KLinkedList;
|
||||
public:
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
using value_type = typename KLinkedList::value_type;
|
||||
using difference_type = typename KLinkedList::difference_type;
|
||||
using pointer = typename std::conditional<Const, KLinkedList::const_pointer, KLinkedList::pointer>::type;
|
||||
using reference = typename std::conditional<Const, KLinkedList::const_reference, KLinkedList::reference>::type;
|
||||
private:
|
||||
BaseIterator base_it;
|
||||
public:
|
||||
explicit Iterator(BaseIterator it) : base_it(it) { /* ... */ }
|
||||
|
||||
pointer GetItem() const {
|
||||
return static_cast<pointer>(this->base_it->GetItem());
|
||||
}
|
||||
|
||||
bool operator==(const Iterator &rhs) const {
|
||||
return this->base_it == rhs.base_it;
|
||||
}
|
||||
|
||||
bool operator!=(const Iterator &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
pointer operator->() const {
|
||||
return this->GetItem();
|
||||
}
|
||||
|
||||
reference operator*() const {
|
||||
return *this->GetItem();
|
||||
}
|
||||
|
||||
Iterator &operator++() {
|
||||
++this->base_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator &operator--() {
|
||||
--this->base_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator operator++(int) {
|
||||
const Iterator it{*this};
|
||||
++(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
Iterator operator--(int) {
|
||||
const Iterator it{*this};
|
||||
--(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
operator Iterator<true>() const {
|
||||
return Iterator<true>(this->base_it);
|
||||
}
|
||||
};
|
||||
public:
|
||||
constexpr KLinkedList() : BaseList() { /* ... */ }
|
||||
|
||||
/* Iterator accessors. */
|
||||
iterator begin() {
|
||||
return iterator(BaseList::begin());
|
||||
}
|
||||
|
||||
const_iterator begin() const {
|
||||
return const_iterator(BaseList::begin());
|
||||
}
|
||||
|
||||
iterator end() {
|
||||
return iterator(BaseList::end());
|
||||
}
|
||||
|
||||
const_iterator end() const {
|
||||
return const_iterator(BaseList::end());
|
||||
}
|
||||
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
reverse_iterator rbegin() {
|
||||
return reverse_iterator(this->end());
|
||||
}
|
||||
|
||||
const_reverse_iterator rbegin() const {
|
||||
return const_reverse_iterator(this->end());
|
||||
}
|
||||
|
||||
reverse_iterator rend() {
|
||||
return reverse_iterator(this->begin());
|
||||
}
|
||||
|
||||
const_reverse_iterator rend() const {
|
||||
return const_reverse_iterator(this->begin());
|
||||
}
|
||||
|
||||
const_reverse_iterator crbegin() const {
|
||||
return this->rbegin();
|
||||
}
|
||||
|
||||
const_reverse_iterator crend() const {
|
||||
return this->rend();
|
||||
}
|
||||
|
||||
/* Content management. */
|
||||
using BaseList::empty;
|
||||
using BaseList::size;
|
||||
|
||||
reference back() {
|
||||
return *(--this->end());
|
||||
}
|
||||
|
||||
const_reference back() const {
|
||||
return *(--this->end());
|
||||
}
|
||||
|
||||
reference front() {
|
||||
return *this->begin();
|
||||
}
|
||||
|
||||
const_reference front() const {
|
||||
return *this->begin();
|
||||
}
|
||||
|
||||
iterator insert(const_iterator pos, reference ref) {
|
||||
KLinkedListNode *node = KLinkedListNode::Allocate();
|
||||
MESOSPHERE_ABORT_UNLESS(node != nullptr);
|
||||
node->Initialize(std::addressof(ref));
|
||||
return iterator(BaseList::insert(pos.base_it, *node));
|
||||
}
|
||||
|
||||
void push_back(reference ref) {
|
||||
this->insert(this->end(), ref);
|
||||
}
|
||||
|
||||
void push_front(reference ref) {
|
||||
this->insert(this->begin(), ref);
|
||||
}
|
||||
|
||||
void pop_back() {
|
||||
this->erase(--this->end());
|
||||
}
|
||||
|
||||
void pop_front() {
|
||||
this->erase(this->begin());
|
||||
}
|
||||
|
||||
iterator erase(const iterator pos) {
|
||||
KLinkedListNode *freed_node = std::addressof(*pos.base_it);
|
||||
iterator ret = iterator(BaseList::erase(pos.base_it));
|
||||
KLinkedListNode::Free(freed_node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,360 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
enum KMemoryState : u32 {
|
||||
KMemoryState_None = 0,
|
||||
KMemoryState_Mask = 0xFF,
|
||||
KMemoryState_All = ~KMemoryState_None,
|
||||
|
||||
KMemoryState_FlagCanReprotect = (1 << 8),
|
||||
KMemoryState_FlagCanDebug = (1 << 9),
|
||||
KMemoryState_FlagCanUseIpc = (1 << 10),
|
||||
KMemoryState_FlagCanUseNonDeviceIpc = (1 << 11),
|
||||
KMemoryState_FlagCanUseNonSecureIpc = (1 << 12),
|
||||
KMemoryState_FlagMapped = (1 << 13),
|
||||
KMemoryState_FlagCode = (1 << 14),
|
||||
KMemoryState_FlagCanAlias = (1 << 15),
|
||||
KMemoryState_FlagCanCodeAlias = (1 << 16),
|
||||
KMemoryState_FlagCanTransfer = (1 << 17),
|
||||
KMemoryState_FlagCanQueryPhysical = (1 << 18),
|
||||
KMemoryState_FlagCanDeviceMap = (1 << 19),
|
||||
KMemoryState_FlagCanAlignedDeviceMap = (1 << 20),
|
||||
KMemoryState_FlagCanIpcUserBuffer = (1 << 21),
|
||||
KMemoryState_FlagReferenceCounted = (1 << 22),
|
||||
KMemoryState_FlagCanMapProcess = (1 << 23),
|
||||
KMemoryState_FlagCanChangeAttribute = (1 << 24),
|
||||
KMemoryState_FlagCanCodeMemory = (1 << 25),
|
||||
|
||||
KMemoryState_FlagsData = KMemoryState_FlagCanReprotect | KMemoryState_FlagCanUseIpc |
|
||||
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
|
||||
KMemoryState_FlagMapped | KMemoryState_FlagCanAlias |
|
||||
KMemoryState_FlagCanTransfer | KMemoryState_FlagCanQueryPhysical |
|
||||
KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap |
|
||||
KMemoryState_FlagCanIpcUserBuffer | KMemoryState_FlagReferenceCounted |
|
||||
KMemoryState_FlagCanChangeAttribute,
|
||||
|
||||
KMemoryState_FlagsCode = KMemoryState_FlagCanDebug | KMemoryState_FlagCanUseIpc |
|
||||
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
|
||||
KMemoryState_FlagMapped | KMemoryState_FlagCode |
|
||||
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap |
|
||||
KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagReferenceCounted,
|
||||
|
||||
KMemoryState_FlagsMisc = KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted |
|
||||
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap,
|
||||
|
||||
|
||||
KMemoryState_Free = ams::svc::MemoryState_Free,
|
||||
KMemoryState_Io = ams::svc::MemoryState_Io | KMemoryState_FlagMapped,
|
||||
KMemoryState_Static = ams::svc::MemoryState_Static | KMemoryState_FlagMapped | KMemoryState_FlagCanQueryPhysical,
|
||||
KMemoryState_Code = ams::svc::MemoryState_Code | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess,
|
||||
KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory,
|
||||
KMemoryState_Normal = ams::svc::MemoryState_Normal | KMemoryState_FlagsData | KMemoryState_FlagCanCodeMemory,
|
||||
KMemoryState_Shared = ams::svc::MemoryState_Shared | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
|
||||
|
||||
/* KMemoryState_Alias was removed after 1.0.0. */
|
||||
|
||||
KMemoryState_AliasCode = ams::svc::MemoryState_AliasCode | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias,
|
||||
KMemoryState_AliasCodeData = ams::svc::MemoryState_AliasCodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias | KMemoryState_FlagCanCodeMemory,
|
||||
|
||||
|
||||
KMemoryState_Ipc = ams::svc::MemoryState_Ipc | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
||||
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_Stack = ams::svc::MemoryState_Stack | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
||||
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
|
||||
|
||||
KMemoryState_Transfered = ams::svc::MemoryState_Transfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanChangeAttribute
|
||||
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_SharedTransfered = ams::svc::MemoryState_SharedTransfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
||||
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_SharedCode = ams::svc::MemoryState_SharedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted
|
||||
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_Inaccessible = ams::svc::MemoryState_Inaccessible,
|
||||
|
||||
KMemoryState_NonSecureIpc = ams::svc::MemoryState_NonSecureIpc | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
||||
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_NonDeviceIpc = ams::svc::MemoryState_NonDeviceIpc | KMemoryState_FlagsMisc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
|
||||
KMemoryState_Kernel = ams::svc::MemoryState_Kernel | KMemoryState_FlagMapped,
|
||||
|
||||
KMemoryState_GeneratedCode = ams::svc::MemoryState_GeneratedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDebug,
|
||||
KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
|
||||
};
|
||||
|
||||
#if 1
|
||||
static_assert(KMemoryState_Free == 0x00000000);
|
||||
static_assert(KMemoryState_Io == 0x00002001);
|
||||
static_assert(KMemoryState_Static == 0x00042002);
|
||||
static_assert(KMemoryState_Code == 0x00DC7E03);
|
||||
static_assert(KMemoryState_CodeData == 0x03FEBD04);
|
||||
static_assert(KMemoryState_Normal == 0x037EBD05);
|
||||
static_assert(KMemoryState_Shared == 0x00402006);
|
||||
|
||||
static_assert(KMemoryState_AliasCode == 0x00DD7E08);
|
||||
static_assert(KMemoryState_AliasCodeData == 0x03FFBD09);
|
||||
static_assert(KMemoryState_Ipc == 0x005C3C0A);
|
||||
static_assert(KMemoryState_Stack == 0x005C3C0B);
|
||||
static_assert(KMemoryState_ThreadLocal == 0x0040200C);
|
||||
static_assert(KMemoryState_Transfered == 0x015C3C0D);
|
||||
static_assert(KMemoryState_SharedTransfered == 0x005C380E);
|
||||
static_assert(KMemoryState_SharedCode == 0x0040380F);
|
||||
static_assert(KMemoryState_Inaccessible == 0x00000010);
|
||||
static_assert(KMemoryState_NonSecureIpc == 0x005C3811);
|
||||
static_assert(KMemoryState_NonDeviceIpc == 0x004C2812);
|
||||
static_assert(KMemoryState_Kernel == 0x00002013);
|
||||
static_assert(KMemoryState_GeneratedCode == 0x00402214);
|
||||
static_assert(KMemoryState_CodeOut == 0x00402015);
|
||||
#endif
|
||||
|
||||
enum KMemoryPermission : u8 {
|
||||
KMemoryPermission_None = 0,
|
||||
KMemoryPermission_All = static_cast<u8>(~KMemoryPermission_None),
|
||||
|
||||
KMemoryPermission_KernelShift = 3,
|
||||
|
||||
KMemoryPermission_KernelRead = ams::svc::MemoryPermission_Read << KMemoryPermission_KernelShift,
|
||||
KMemoryPermission_KernelWrite = ams::svc::MemoryPermission_Write << KMemoryPermission_KernelShift,
|
||||
KMemoryPermission_KernelExecute = ams::svc::MemoryPermission_Execute << KMemoryPermission_KernelShift,
|
||||
|
||||
KMemoryPermission_KernelReadWrite = KMemoryPermission_KernelRead | KMemoryPermission_KernelWrite,
|
||||
KMemoryPermission_KernelReadExecute = KMemoryPermission_KernelRead | KMemoryPermission_KernelExecute,
|
||||
|
||||
KMemoryPermission_UserRead = ams::svc::MemoryPermission_Read | KMemoryPermission_KernelRead,
|
||||
KMemoryPermission_UserWrite = ams::svc::MemoryPermission_Write | KMemoryPermission_KernelWrite,
|
||||
KMemoryPermission_UserExecute = ams::svc::MemoryPermission_Execute,
|
||||
|
||||
KMemoryPermission_UserReadWrite = KMemoryPermission_UserRead | KMemoryPermission_UserWrite,
|
||||
KMemoryPermission_UserReadExecute = KMemoryPermission_UserRead | KMemoryPermission_UserExecute,
|
||||
|
||||
KMemoryPermission_UserMask = ams::svc::MemoryPermission_Read | ams::svc::MemoryPermission_Write | ams::svc::MemoryPermission_Execute,
|
||||
};
|
||||
|
||||
constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) {
|
||||
return static_cast<KMemoryPermission>((perm & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((perm & KMemoryPermission_UserWrite) << KMemoryPermission_KernelShift));
|
||||
}
|
||||
|
||||
enum KMemoryAttribute : u8 {
|
||||
KMemoryAttribute_None = 0x00,
|
||||
KMemoryAttribute_Mask = 0x7F,
|
||||
KMemoryAttribute_All = KMemoryAttribute_Mask,
|
||||
KMemoryAttribute_DontCareMask = 0x80,
|
||||
|
||||
KMemoryAttribute_Locked = ams::svc::MemoryAttribute_Locked,
|
||||
KMemoryAttribute_IpcLocked = ams::svc::MemoryAttribute_IpcLocked,
|
||||
KMemoryAttribute_DeviceShared = ams::svc::MemoryAttribute_DeviceShared,
|
||||
KMemoryAttribute_Uncached = ams::svc::MemoryAttribute_Uncached,
|
||||
};
|
||||
|
||||
static_assert((KMemoryAttribute_Mask & KMemoryAttribute_DontCareMask) == 0);
|
||||
static_assert(static_cast<typename std::underlying_type<KMemoryAttribute>::type>(~(KMemoryAttribute_Mask | KMemoryAttribute_DontCareMask)) == 0);
|
||||
|
||||
struct KMemoryInfo {
|
||||
uintptr_t address;
|
||||
size_t size;
|
||||
KMemoryState state;
|
||||
KMemoryPermission perm;
|
||||
KMemoryAttribute attribute;
|
||||
KMemoryPermission original_perm;
|
||||
u16 ipc_lock_count;
|
||||
u16 device_use_count;
|
||||
|
||||
constexpr ams::svc::MemoryInfo GetSvcMemoryInfo() const {
|
||||
return {
|
||||
.addr = this->address,
|
||||
.size = this->size,
|
||||
.state = static_cast<ams::svc::MemoryState>(this->state & KMemoryState_Mask),
|
||||
.attr = static_cast<ams::svc::MemoryAttribute>(this->attribute & KMemoryAttribute_Mask),
|
||||
.perm = static_cast<ams::svc::MemoryPermission>(this->perm & KMemoryPermission_UserMask),
|
||||
.ipc_refcount = this->ipc_lock_count,
|
||||
.device_refcount = this->device_use_count,
|
||||
};
|
||||
}
|
||||
|
||||
constexpr uintptr_t GetAddress() const {
|
||||
return this->address;
|
||||
}
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
return this->size;
|
||||
}
|
||||
|
||||
constexpr size_t GetNumPages() const {
|
||||
return this->GetSize() / PageSize;
|
||||
}
|
||||
|
||||
constexpr uintptr_t GetEndAddress() const {
|
||||
return this->GetAddress() + this->GetSize();
|
||||
}
|
||||
|
||||
constexpr uintptr_t GetLastAddress() const {
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
};
|
||||
|
||||
class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
|
||||
private:
|
||||
KProcessAddress address;
|
||||
size_t num_pages;
|
||||
KMemoryState memory_state;
|
||||
u16 ipc_lock_count;
|
||||
u16 device_use_count;
|
||||
KMemoryPermission perm;
|
||||
KMemoryPermission original_perm;
|
||||
KMemoryAttribute attribute;
|
||||
public:
|
||||
static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) {
|
||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||
return -1;
|
||||
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
public:
|
||||
constexpr KProcessAddress GetAddress() const {
|
||||
return this->address;
|
||||
}
|
||||
|
||||
constexpr size_t GetNumPages() const {
|
||||
return this->num_pages;
|
||||
}
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
return this->GetNumPages() * PageSize;
|
||||
}
|
||||
|
||||
constexpr KProcessAddress GetEndAddress() const {
|
||||
return this->GetAddress() + this->GetSize();
|
||||
}
|
||||
|
||||
constexpr KProcessAddress GetLastAddress() const {
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr KMemoryInfo GetMemoryInfo() const {
|
||||
return {
|
||||
.address = GetInteger(this->GetAddress()),
|
||||
.size = this->GetSize(),
|
||||
.state = this->memory_state,
|
||||
.perm = this->perm,
|
||||
.attribute = this->attribute,
|
||||
.original_perm = this->original_perm,
|
||||
.ipc_lock_count = this->ipc_lock_count,
|
||||
.device_use_count = this->device_use_count,
|
||||
};
|
||||
}
|
||||
public:
|
||||
constexpr KMemoryBlock()
|
||||
: address(), num_pages(), memory_state(KMemoryState_None), ipc_lock_count(), device_use_count(), perm(), original_perm(), attribute()
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr KMemoryBlock(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr)
|
||||
: address(addr), num_pages(np), memory_state(ms), ipc_lock_count(0), device_use_count(0), perm(p), original_perm(KMemoryPermission_None), attribute(attr)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
this->address = addr;
|
||||
this->num_pages = np;
|
||||
this->memory_state = ms;
|
||||
this->ipc_lock_count = 0;
|
||||
this->device_use_count = 0;
|
||||
this->perm = p;
|
||||
this->original_perm = KMemoryPermission_None;
|
||||
this->attribute = attr;
|
||||
}
|
||||
|
||||
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
constexpr auto AttributeIgnoreMask = KMemoryAttribute_DontCareMask | KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
|
||||
return this->memory_state == s && this->perm == p && (this->attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||
}
|
||||
|
||||
constexpr bool HasSameProperties(const KMemoryBlock &rhs) const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
return this->memory_state == rhs.memory_state &&
|
||||
this->perm == rhs.perm &&
|
||||
this->original_perm == rhs.original_perm &&
|
||||
this->attribute == rhs.attribute &&
|
||||
this->ipc_lock_count == rhs.ipc_lock_count &&
|
||||
this->device_use_count == rhs.device_use_count;
|
||||
}
|
||||
|
||||
constexpr bool Contains(KProcessAddress addr) const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
|
||||
}
|
||||
|
||||
constexpr void Add(size_t np) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(np > 0);
|
||||
MESOSPHERE_ASSERT(this->GetAddress() + np * PageSize - 1 < this->GetEndAddress() + np * PageSize - 1);
|
||||
|
||||
this->num_pages += np;
|
||||
}
|
||||
|
||||
constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(this->original_perm == KMemoryPermission_None);
|
||||
MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_IpcLocked) == 0);
|
||||
|
||||
this->memory_state = s;
|
||||
this->perm = p;
|
||||
this->attribute = static_cast<KMemoryAttribute>(a | (this->attribute & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)));
|
||||
}
|
||||
|
||||
constexpr void Split(KMemoryBlock *block, KProcessAddress addr) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(this->GetAddress() < addr);
|
||||
MESOSPHERE_ASSERT(this->Contains(addr));
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), PageSize));
|
||||
|
||||
block->address = this->address;
|
||||
block->num_pages = (addr - this->GetAddress()) / PageSize;
|
||||
block->memory_state = this->memory_state;
|
||||
block->ipc_lock_count = this->ipc_lock_count;
|
||||
block->device_use_count = this->device_use_count;
|
||||
block->perm = this->perm;
|
||||
block->original_perm = this->original_perm;
|
||||
block->attribute = this->attribute;
|
||||
|
||||
this->address = addr;
|
||||
this->num_pages -= block->num_pages;
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
|
||||
|
||||
}
|
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_memory_block.hpp>
|
||||
#include <mesosphere/kern_k_page_table_manager.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KMemoryBlockManagerUpdateAllocator {
|
||||
public:
|
||||
static constexpr size_t NumBlocks = 2;
|
||||
private:
|
||||
KMemoryBlock *blocks[NumBlocks];
|
||||
size_t index;
|
||||
KMemoryBlockSlabManager *slab_manager;
|
||||
Result result;
|
||||
public:
|
||||
explicit KMemoryBlockManagerUpdateAllocator(KMemoryBlockSlabManager *sm) : blocks(), index(), slab_manager(sm), result(svc::ResultOutOfResource()) {
|
||||
for (size_t i = 0; i < NumBlocks; i++) {
|
||||
this->blocks[i] = this->slab_manager->Allocate();
|
||||
if (this->blocks[i] == nullptr) {
|
||||
this->result = svc::ResultOutOfResource();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
this->result = ResultSuccess();
|
||||
}
|
||||
|
||||
~KMemoryBlockManagerUpdateAllocator() {
|
||||
for (size_t i = 0; i < NumBlocks; i++) {
|
||||
if (this->blocks[i] != nullptr) {
|
||||
this->slab_manager->Free(this->blocks[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Result GetResult() const {
|
||||
return this->result;
|
||||
}
|
||||
|
||||
KMemoryBlock *Allocate() {
|
||||
MESOSPHERE_ABORT_UNLESS(this->index < NumBlocks);
|
||||
MESOSPHERE_ABORT_UNLESS(this->blocks[this->index] != nullptr);
|
||||
KMemoryBlock *block = nullptr;
|
||||
std::swap(block, this->blocks[this->index++]);
|
||||
return block;
|
||||
}
|
||||
|
||||
void Free(KMemoryBlock *block) {
|
||||
MESOSPHERE_ABORT_UNLESS(this->index <= NumBlocks);
|
||||
MESOSPHERE_ABORT_UNLESS(block != nullptr);
|
||||
if (this->index == 0) {
|
||||
this->slab_manager->Free(block);
|
||||
} else {
|
||||
this->blocks[--this->index] = block;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class KMemoryBlockManager {
|
||||
public:
|
||||
using MemoryBlockTree = util::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
|
||||
using iterator = MemoryBlockTree::iterator;
|
||||
using const_iterator = MemoryBlockTree::const_iterator;
|
||||
private:
|
||||
MemoryBlockTree memory_block_tree;
|
||||
KProcessAddress start_address;
|
||||
KProcessAddress end_address;
|
||||
public:
|
||||
constexpr KMemoryBlockManager() : memory_block_tree(), start_address(), end_address() { /* ... */ }
|
||||
|
||||
iterator end() { return this->memory_block_tree.end(); }
|
||||
const_iterator end() const { return this->memory_block_tree.end(); }
|
||||
const_iterator cend() const { return this->memory_block_tree.cend(); }
|
||||
|
||||
Result Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager);
|
||||
void Finalize(KMemoryBlockSlabManager *slab_manager);
|
||||
|
||||
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const;
|
||||
|
||||
void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
|
||||
|
||||
iterator FindIterator(KProcessAddress address) const {
|
||||
return this->memory_block_tree.find(KMemoryBlock(address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None));
|
||||
}
|
||||
|
||||
const KMemoryBlock *FindBlock(KProcessAddress address) const {
|
||||
if (const_iterator it = this->FindIterator(address); it != this->memory_block_tree.end()) {
|
||||
return std::addressof(*it);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/* Debug. */
|
||||
bool CheckState() const;
|
||||
void DumpBlocks() const;
|
||||
};
|
||||
|
||||
class KScopedMemoryBlockManagerAuditor {
|
||||
private:
|
||||
KMemoryBlockManager *manager;
|
||||
public:
|
||||
explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager *m) : manager(m) { MESOSPHERE_AUDIT(this->manager->CheckState()); }
|
||||
explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager &m) : KScopedMemoryBlockManagerAuditor(std::addressof(m)) { /* ... */ }
|
||||
ALWAYS_INLINE ~KScopedMemoryBlockManagerAuditor() { MESOSPHERE_AUDIT(this->manager->CheckState()); }
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,700 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/init/kern_init_page_table_select.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
constexpr size_t KernelAslrAlignment = 2_MB;
|
||||
constexpr size_t KernelVirtualAddressSpaceWidth = size_t(1ul) << 39ul;
|
||||
constexpr size_t KernelPhysicalAddressSpaceWidth = size_t(1ul) << 48ul;
|
||||
|
||||
constexpr size_t KernelVirtualAddressSpaceBase = 0ul - KernelVirtualAddressSpaceWidth;
|
||||
constexpr size_t KernelVirtualAddressSpaceEnd = KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
|
||||
constexpr size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ul;
|
||||
constexpr size_t KernelVirtualAddressSpaceSize = KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
|
||||
|
||||
constexpr size_t KernelPhysicalAddressSpaceBase = 0ul;
|
||||
constexpr size_t KernelPhysicalAddressSpaceEnd = KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth;
|
||||
constexpr size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ul;
|
||||
constexpr size_t KernelPhysicalAddressSpaceSize = KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase;
|
||||
|
||||
enum KMemoryRegionType : u32 {
|
||||
KMemoryRegionAttr_CarveoutProtected = 0x04000000,
|
||||
KMemoryRegionAttr_DidKernelMap = 0x08000000,
|
||||
KMemoryRegionAttr_ShouldKernelMap = 0x10000000,
|
||||
KMemoryRegionAttr_UserReadOnly = 0x20000000,
|
||||
KMemoryRegionAttr_NoUserMap = 0x40000000,
|
||||
KMemoryRegionAttr_LinearMapped = 0x80000000,
|
||||
|
||||
KMemoryRegionType_None = 0,
|
||||
KMemoryRegionType_Kernel = 1,
|
||||
KMemoryRegionType_Dram = 2,
|
||||
KMemoryRegionType_CoreLocal = 4,
|
||||
|
||||
KMemoryRegionType_VirtualKernelPtHeap = 0x2A,
|
||||
KMemoryRegionType_VirtualKernelTraceBuffer = 0x4A,
|
||||
KMemoryRegionType_VirtualKernelInitPt = 0x19A,
|
||||
|
||||
KMemoryRegionType_VirtualDramMetadataPool = 0x29A,
|
||||
KMemoryRegionType_VirtualDramManagedPool = 0x31A,
|
||||
KMemoryRegionType_VirtualDramApplicationPool = 0x271A,
|
||||
KMemoryRegionType_VirtualDramAppletPool = 0x1B1A,
|
||||
KMemoryRegionType_VirtualDramSystemPool = 0x2B1A,
|
||||
KMemoryRegionType_VirtualDramSystemNonSecurePool = 0x331A,
|
||||
|
||||
KMemoryRegionType_Uart = 0x1D,
|
||||
KMemoryRegionType_InterruptDistributor = 0x4D | KMemoryRegionAttr_NoUserMap,
|
||||
KMemoryRegionType_InterruptCpuInterface = 0x2D | KMemoryRegionAttr_NoUserMap,
|
||||
|
||||
KMemoryRegionType_MemoryController = 0x55,
|
||||
KMemoryRegionType_MemoryController0 = 0x95,
|
||||
KMemoryRegionType_MemoryController1 = 0x65,
|
||||
KMemoryRegionType_PowerManagementController = 0x1A5,
|
||||
|
||||
KMemoryRegionType_KernelAutoMap = KMemoryRegionType_Kernel | KMemoryRegionAttr_ShouldKernelMap,
|
||||
|
||||
KMemoryRegionType_KernelTemp = 0x31,
|
||||
|
||||
KMemoryRegionType_KernelCode = 0x19,
|
||||
KMemoryRegionType_KernelStack = 0x29,
|
||||
KMemoryRegionType_KernelMisc = 0x49,
|
||||
KMemoryRegionType_KernelSlab = 0x89,
|
||||
|
||||
KMemoryRegionType_KernelMiscMainStack = 0xB49,
|
||||
KMemoryRegionType_KernelMiscMappedDevice = 0xD49,
|
||||
KMemoryRegionType_KernelMiscIdleStack = 0x1349,
|
||||
KMemoryRegionType_KernelMiscUnknownDebug = 0x1549,
|
||||
KMemoryRegionType_KernelMiscExceptionStack = 0x2349,
|
||||
|
||||
KMemoryRegionType_DramLinearMapped = KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped,
|
||||
|
||||
KMemoryRegionType_DramReservedEarly = 0x16 | KMemoryRegionAttr_NoUserMap,
|
||||
KMemoryRegionType_DramPoolPartition = 0x26 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped,
|
||||
KMemoryRegionType_DramMetadataPool = 0x166 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_CarveoutProtected,
|
||||
|
||||
KMemoryRegionType_DramNonKernel = 0x1A6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped,
|
||||
|
||||
KMemoryRegionType_DramApplicationPool = 0x7A6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped,
|
||||
KMemoryRegionType_DramAppletPool = 0xBA6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped,
|
||||
KMemoryRegionType_DramSystemNonSecurePool = 0xDA6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped,
|
||||
KMemoryRegionType_DramSystemPool = 0x13A6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_CarveoutProtected,
|
||||
|
||||
|
||||
|
||||
KMemoryRegionType_DramKernel = 0xE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected,
|
||||
KMemoryRegionType_DramKernelCode = 0xCE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected,
|
||||
KMemoryRegionType_DramKernelSlab = 0x14E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected,
|
||||
KMemoryRegionType_DramKernelPtHeap = 0x24E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_LinearMapped,
|
||||
KMemoryRegionType_DramKernelInitPt = 0x44E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_LinearMapped,
|
||||
|
||||
/* These regions aren't normally mapped in retail kernel. */
|
||||
KMemoryRegionType_KernelTraceBuffer = 0xA6 | KMemoryRegionAttr_UserReadOnly | KMemoryRegionAttr_LinearMapped,
|
||||
KMemoryRegionType_OnMemoryBootImage = 0x156,
|
||||
KMemoryRegionType_DTB = 0x256,
|
||||
};
|
||||
|
||||
constexpr ALWAYS_INLINE KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
|
||||
if (type_id == (type_id | KMemoryRegionType_KernelTraceBuffer)) {
|
||||
return KMemoryRegionType_VirtualKernelTraceBuffer;
|
||||
} else if (type_id == (type_id | KMemoryRegionType_DramKernelPtHeap)) {
|
||||
return KMemoryRegionType_VirtualKernelPtHeap;
|
||||
} else {
|
||||
return KMemoryRegionType_Dram;
|
||||
}
|
||||
}
|
||||
|
||||
class KMemoryRegion : public util::IntrusiveRedBlackTreeBaseNode<KMemoryRegion> {
|
||||
NON_COPYABLE(KMemoryRegion);
|
||||
NON_MOVEABLE(KMemoryRegion);
|
||||
private:
|
||||
uintptr_t address;
|
||||
uintptr_t pair_address;
|
||||
size_t region_size;
|
||||
u32 attributes;
|
||||
u32 type_id;
|
||||
public:
|
||||
static constexpr ALWAYS_INLINE int Compare(const KMemoryRegion &lhs, const KMemoryRegion &rhs) {
|
||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||
return -1;
|
||||
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KMemoryRegion() : address(0), pair_address(0), region_size(0), attributes(0), type_id(0) { /* ... */ }
|
||||
constexpr ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t rs, uintptr_t p, u32 r, u32 t) :
|
||||
address(a), pair_address(p), region_size(rs), attributes(r), type_id(t)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
constexpr ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t rs, u32 r, u32 t) : KMemoryRegion(a, rs, std::numeric_limits<uintptr_t>::max(), r, t) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetAddress() const {
|
||||
return this->address;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetPairAddress() const {
|
||||
return this->pair_address;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetSize() const {
|
||||
return this->region_size;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const {
|
||||
return this->GetAddress() + this->GetSize();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const {
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u32 GetAttributes() const {
|
||||
return this->attributes;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u32 GetType() const {
|
||||
return this->type_id;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetType(u32 type) {
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(this->CanDerive(type));
|
||||
this->type_id = type;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
||||
return this->GetAddress() <= address && address <= this->GetLastAddress();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsDerivedFrom(u32 type) const {
|
||||
return (this->GetType() | type) == this->GetType();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool HasTypeAttribute(KMemoryRegionType attr) const {
|
||||
return (this->GetType() | attr) == this->GetType();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool CanDerive(u32 type) const {
|
||||
return (this->GetType() | type) == type;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetPairAddress(uintptr_t a) {
|
||||
this->pair_address = a;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetTypeAttribute(KMemoryRegionType attr) {
|
||||
this->type_id |= attr;
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<KMemoryRegion>::value);
|
||||
|
||||
class KMemoryRegionTree {
|
||||
public:
|
||||
struct DerivedRegionExtents {
|
||||
const KMemoryRegion *first_region;
|
||||
const KMemoryRegion *last_region;
|
||||
|
||||
constexpr DerivedRegionExtents() : first_region(nullptr), last_region(nullptr) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetAddress() const {
|
||||
return this->first_region->GetAddress();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const {
|
||||
return this->last_region->GetEndAddress();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetSize() const {
|
||||
return this->GetEndAddress() - this->GetAddress();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const {
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
};
|
||||
private:
|
||||
using TreeType = util::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
|
||||
public:
|
||||
using value_type = TreeType::value_type;
|
||||
using size_type = TreeType::size_type;
|
||||
using difference_type = TreeType::difference_type;
|
||||
using pointer = TreeType::pointer;
|
||||
using const_pointer = TreeType::const_pointer;
|
||||
using reference = TreeType::reference;
|
||||
using const_reference = TreeType::const_reference;
|
||||
using iterator = TreeType::iterator;
|
||||
using const_iterator = TreeType::const_iterator;
|
||||
private:
|
||||
TreeType tree;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KMemoryRegionTree() : tree() { /* ... */ }
|
||||
public:
|
||||
iterator FindContainingRegion(uintptr_t address) {
|
||||
return this->find(KMemoryRegion(address, 1, 0, 0));
|
||||
}
|
||||
|
||||
iterator FindFirstRegionByTypeAttr(u32 type_id, u32 attr = 0) {
|
||||
for (auto it = this->begin(); it != this->end(); it++) {
|
||||
if (it->GetType() == type_id && it->GetAttributes() == attr) {
|
||||
return it;
|
||||
}
|
||||
}
|
||||
MESOSPHERE_INIT_ABORT();
|
||||
}
|
||||
|
||||
iterator FindFirstRegionByType(u32 type_id) {
|
||||
for (auto it = this->begin(); it != this->end(); it++) {
|
||||
if (it->GetType() == type_id) {
|
||||
return it;
|
||||
}
|
||||
}
|
||||
MESOSPHERE_INIT_ABORT();
|
||||
}
|
||||
|
||||
iterator FindFirstDerivedRegion(u32 type_id) {
|
||||
for (auto it = this->begin(); it != this->end(); it++) {
|
||||
if (it->IsDerivedFrom(type_id)) {
|
||||
return it;
|
||||
}
|
||||
}
|
||||
MESOSPHERE_INIT_ABORT();
|
||||
}
|
||||
|
||||
DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const {
|
||||
DerivedRegionExtents extents;
|
||||
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(extents.first_region == nullptr);
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(extents.last_region == nullptr);
|
||||
|
||||
for (auto it = this->cbegin(); it != this->cend(); it++) {
|
||||
if (it->IsDerivedFrom(type_id)) {
|
||||
if (extents.first_region == nullptr) {
|
||||
extents.first_region = std::addressof(*it);
|
||||
}
|
||||
extents.last_region = std::addressof(*it);
|
||||
}
|
||||
}
|
||||
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(extents.first_region != nullptr);
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(extents.last_region != nullptr);
|
||||
|
||||
return extents;
|
||||
}
|
||||
public:
|
||||
NOINLINE bool Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
|
||||
NOINLINE KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
|
||||
|
||||
ALWAYS_INLINE KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id, size_t guard_size) {
|
||||
return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
|
||||
}
|
||||
public:
|
||||
/* Iterator accessors. */
|
||||
iterator begin() {
|
||||
return this->tree.begin();
|
||||
}
|
||||
|
||||
const_iterator begin() const {
|
||||
return this->tree.begin();
|
||||
}
|
||||
|
||||
iterator end() {
|
||||
return this->tree.end();
|
||||
}
|
||||
|
||||
const_iterator end() const {
|
||||
return this->tree.end();
|
||||
}
|
||||
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
iterator iterator_to(reference ref) {
|
||||
return this->tree.iterator_to(ref);
|
||||
}
|
||||
|
||||
const_iterator iterator_to(const_reference ref) const {
|
||||
return this->tree.iterator_to(ref);
|
||||
}
|
||||
|
||||
/* Content management. */
|
||||
bool empty() const {
|
||||
return this->tree.empty();
|
||||
}
|
||||
|
||||
reference back() {
|
||||
return this->tree.back();
|
||||
}
|
||||
|
||||
const_reference back() const {
|
||||
return this->tree.back();
|
||||
}
|
||||
|
||||
reference front() {
|
||||
return this->tree.front();
|
||||
}
|
||||
|
||||
const_reference front() const {
|
||||
return this->tree.front();
|
||||
}
|
||||
|
||||
/* GCC over-eagerly inlines this operation. */
|
||||
NOINLINE iterator insert(reference ref) {
|
||||
return this->tree.insert(ref);
|
||||
}
|
||||
|
||||
NOINLINE iterator erase(iterator it) {
|
||||
return this->tree.erase(it);
|
||||
}
|
||||
|
||||
iterator find(const_reference ref) const {
|
||||
return this->tree.find(ref);
|
||||
}
|
||||
|
||||
iterator nfind(const_reference ref) const {
|
||||
return this->tree.nfind(ref);
|
||||
}
|
||||
};
|
||||
|
||||
class KMemoryRegionAllocator {
|
||||
NON_COPYABLE(KMemoryRegionAllocator);
|
||||
NON_MOVEABLE(KMemoryRegionAllocator);
|
||||
public:
|
||||
static constexpr size_t MaxMemoryRegions = 1000;
|
||||
friend class KMemoryLayout;
|
||||
private:
|
||||
KMemoryRegion region_heap[MaxMemoryRegions];
|
||||
size_t num_regions;
|
||||
private:
|
||||
constexpr ALWAYS_INLINE KMemoryRegionAllocator() : region_heap(), num_regions() { /* ... */ }
|
||||
public:
|
||||
ALWAYS_INLINE KMemoryRegion *Allocate() {
|
||||
/* Ensure we stay within the bounds of our heap. */
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(this->num_regions < MaxMemoryRegions);
|
||||
|
||||
return &this->region_heap[this->num_regions++];
|
||||
}
|
||||
|
||||
template<typename... Args>
|
||||
ALWAYS_INLINE KMemoryRegion *Create(Args&&... args) {
|
||||
KMemoryRegion *region = this->Allocate();
|
||||
new (region) KMemoryRegion(std::forward<Args>(args)...);
|
||||
return region;
|
||||
}
|
||||
};
|
||||
|
||||
class KMemoryLayout {
|
||||
private:
|
||||
static /* constinit */ inline uintptr_t s_linear_phys_to_virt_diff;
|
||||
static /* constinit */ inline uintptr_t s_linear_virt_to_phys_diff;
|
||||
static /* constinit */ inline KMemoryRegionAllocator s_region_allocator;
|
||||
static /* constinit */ inline KMemoryRegionTree s_virtual_tree;
|
||||
static /* constinit */ inline KMemoryRegionTree s_physical_tree;
|
||||
static /* constinit */ inline KMemoryRegionTree s_virtual_linear_tree;
|
||||
static /* constinit */ inline KMemoryRegionTree s_physical_linear_tree;
|
||||
private:
|
||||
static ALWAYS_INLINE auto GetVirtualLinearExtents(const KMemoryRegionTree::DerivedRegionExtents physical) {
|
||||
return KMemoryRegion(GetInteger(GetLinearVirtualAddress(physical.GetAddress())), physical.GetSize(), 0, KMemoryRegionType_None);
|
||||
}
|
||||
public:
|
||||
static ALWAYS_INLINE KMemoryRegionAllocator &GetMemoryRegionAllocator() { return s_region_allocator; }
|
||||
static ALWAYS_INLINE KMemoryRegionTree &GetVirtualMemoryRegionTree() { return s_virtual_tree; }
|
||||
static ALWAYS_INLINE KMemoryRegionTree &GetPhysicalMemoryRegionTree() { return s_physical_tree; }
|
||||
static ALWAYS_INLINE KMemoryRegionTree &GetVirtualLinearMemoryRegionTree() { return s_virtual_linear_tree; }
|
||||
static ALWAYS_INLINE KMemoryRegionTree &GetPhysicalLinearMemoryRegionTree() { return s_physical_linear_tree; }
|
||||
|
||||
static ALWAYS_INLINE KMemoryRegionTree::iterator GetEnd(KVirtualAddress) {
|
||||
return GetVirtualLinearMemoryRegionTree().end();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KMemoryRegionTree::iterator GetEnd(KPhysicalAddress) {
|
||||
return GetPhysicalMemoryRegionTree().end();
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegionTree::iterator FindContainingRegion(KVirtualAddress address) {
|
||||
return GetVirtualMemoryRegionTree().FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegionTree::iterator FindContainingRegion(KPhysicalAddress address) {
|
||||
return GetPhysicalMemoryRegionTree().FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress address) {
|
||||
return GetInteger(address) + s_linear_phys_to_virt_diff;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress address) {
|
||||
return GetInteger(address) + s_linear_virt_to_phys_diff;
|
||||
}
|
||||
|
||||
static NOINLINE KVirtualAddress GetMainStackTopAddress(s32 core_id) {
|
||||
return GetVirtualMemoryRegionTree().FindFirstRegionByTypeAttr(KMemoryRegionType_KernelMiscMainStack, static_cast<u32>(core_id))->GetEndAddress();
|
||||
}
|
||||
|
||||
static NOINLINE KVirtualAddress GetIdleStackTopAddress(s32 core_id) {
|
||||
return GetVirtualMemoryRegionTree().FindFirstRegionByTypeAttr(KMemoryRegionType_KernelMiscIdleStack, static_cast<u32>(core_id))->GetEndAddress();
|
||||
}
|
||||
|
||||
static NOINLINE KVirtualAddress GetExceptionStackTopAddress(s32 core_id) {
|
||||
return GetVirtualMemoryRegionTree().FindFirstRegionByTypeAttr(KMemoryRegionType_KernelMiscExceptionStack, static_cast<u32>(core_id))->GetEndAddress();
|
||||
}
|
||||
|
||||
static NOINLINE KVirtualAddress GetSlabRegionAddress() {
|
||||
return GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelSlab)->GetAddress();
|
||||
}
|
||||
|
||||
static NOINLINE KVirtualAddress GetCoreLocalRegionAddress() {
|
||||
return GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_CoreLocal)->GetAddress();
|
||||
}
|
||||
|
||||
static NOINLINE KVirtualAddress GetInterruptDistributorAddress() {
|
||||
return GetPhysicalMemoryRegionTree().FindFirstDerivedRegion(KMemoryRegionType_InterruptDistributor)->GetPairAddress();
|
||||
}
|
||||
|
||||
static NOINLINE KVirtualAddress GetInterruptCpuInterfaceAddress() {
|
||||
return GetPhysicalMemoryRegionTree().FindFirstDerivedRegion(KMemoryRegionType_InterruptCpuInterface)->GetPairAddress();
|
||||
}
|
||||
|
||||
static NOINLINE KVirtualAddress GetUartAddress() {
|
||||
return GetPhysicalMemoryRegionTree().FindFirstDerivedRegion(KMemoryRegionType_Uart)->GetPairAddress();
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegion &GetMemoryControllerRegion() {
|
||||
return *GetPhysicalMemoryRegionTree().FindFirstDerivedRegion(KMemoryRegionType_MemoryController);
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegion &GetMetadataPoolRegion() {
|
||||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_VirtualDramMetadataPool);
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegion &GetPageTableHeapRegion() {
|
||||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_VirtualKernelPtHeap);
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegion &GetKernelStackRegion() {
|
||||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelStack);
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegion &GetTempRegion() {
|
||||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelTemp);
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) {
|
||||
return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
|
||||
static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, const KMemoryRegion *hint = nullptr) {
|
||||
auto &tree = GetPhysicalLinearMemoryRegionTree();
|
||||
KMemoryRegionTree::const_iterator it = tree.end();
|
||||
if (hint != nullptr) {
|
||||
it = tree.iterator_to(*hint);
|
||||
}
|
||||
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||
it = tree.FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) {
|
||||
if (out) {
|
||||
*out = std::addressof(*it);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, size_t size, const KMemoryRegion *hint = nullptr) {
|
||||
auto &tree = GetPhysicalLinearMemoryRegionTree();
|
||||
KMemoryRegionTree::const_iterator it = tree.end();
|
||||
if (hint != nullptr) {
|
||||
it = tree.iterator_to(*hint);
|
||||
}
|
||||
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||
it = tree.FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) {
|
||||
const uintptr_t last_address = GetInteger(address) + size - 1;
|
||||
do {
|
||||
if (last_address <= it->GetLastAddress()) {
|
||||
if (out) {
|
||||
*out = std::addressof(*it);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
it++;
|
||||
} while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, const KMemoryRegion *hint = nullptr) {
|
||||
auto &tree = GetVirtualLinearMemoryRegionTree();
|
||||
KMemoryRegionTree::const_iterator it = tree.end();
|
||||
if (hint != nullptr) {
|
||||
it = tree.iterator_to(*hint);
|
||||
}
|
||||
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||
it = tree.FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) {
|
||||
if (out) {
|
||||
*out = std::addressof(*it);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, size_t size, const KMemoryRegion *hint = nullptr) {
|
||||
auto &tree = GetVirtualLinearMemoryRegionTree();
|
||||
KMemoryRegionTree::const_iterator it = tree.end();
|
||||
if (hint != nullptr) {
|
||||
it = tree.iterator_to(*hint);
|
||||
}
|
||||
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||
it = tree.FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) {
|
||||
const uintptr_t last_address = GetInteger(address) + size - 1;
|
||||
do {
|
||||
if (last_address <= it->GetLastAddress()) {
|
||||
if (out) {
|
||||
*out = std::addressof(*it);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
it++;
|
||||
} while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static NOINLINE std::tuple<size_t, size_t> GetTotalAndKernelMemorySizes() {
|
||||
size_t total_size = 0, kernel_size = 0;
|
||||
for (auto it = GetPhysicalMemoryRegionTree().cbegin(); it != GetPhysicalMemoryRegionTree().cend(); it++) {
|
||||
if (it->IsDerivedFrom(KMemoryRegionType_Dram)) {
|
||||
total_size += it->GetSize();
|
||||
if (!it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) {
|
||||
kernel_size += it->GetSize();
|
||||
}
|
||||
}
|
||||
}
|
||||
return std::make_tuple(total_size, kernel_size);
|
||||
}
|
||||
|
||||
static void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start);
|
||||
|
||||
static NOINLINE auto GetKernelRegionExtents() {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelCodeRegionExtents() {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelStackRegionExtents() {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelStack);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelMiscRegionExtents() {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelMisc);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelSlabRegionExtents() {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelSlab);
|
||||
}
|
||||
|
||||
static NOINLINE const KMemoryRegion &GetCoreLocalRegion() {
|
||||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_CoreLocal);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetLinearRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionAttr_LinearMapped);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetLinearRegionExtents() {
|
||||
return GetVirtualLinearExtents(GetLinearRegionPhysicalExtents());
|
||||
}
|
||||
|
||||
static NOINLINE auto GetCarveoutRegionExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionAttr_CarveoutProtected);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernel);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelCodeRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelCode);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelSlabRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelSlab);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelPageTableHeapRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelPtHeap);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelInitPageTableRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelInitPt);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelPoolPartitionRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolPartition);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelMetadataPoolRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramMetadataPool);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelSystemPoolRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemPool);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemNonSecurePool);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelAppletPoolRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramAppletPool);
|
||||
}
|
||||
|
||||
static NOINLINE auto GetKernelApplicationPoolRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramApplicationPool);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
namespace init {
|
||||
|
||||
/* These should be generic, regardless of board. */
|
||||
void SetupCoreLocalRegionMemoryRegions(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator);
|
||||
void SetupPoolPartitionMemoryRegions();
|
||||
|
||||
/* These may be implemented in a board-specific manner. */
|
||||
void SetupDevicePhysicalMemoryRegions();
|
||||
void SetupDramPhysicalMemoryRegions();
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,207 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_light_lock.hpp>
|
||||
#include <mesosphere/kern_k_memory_layout.hpp>
|
||||
#include <mesosphere/kern_k_page_heap.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KPageGroup;
|
||||
|
||||
class KMemoryManager {
|
||||
public:
|
||||
enum Pool {
|
||||
Pool_Application = 0,
|
||||
Pool_Applet = 1,
|
||||
Pool_System = 2,
|
||||
Pool_SystemNonSecure = 3,
|
||||
|
||||
Pool_Count,
|
||||
|
||||
Pool_Shift = 4,
|
||||
Pool_Mask = (0xF << Pool_Shift),
|
||||
};
|
||||
|
||||
enum Direction {
|
||||
Direction_FromFront = 0,
|
||||
Direction_FromBack = 1,
|
||||
|
||||
Direction_Shift = 0,
|
||||
Direction_Mask = (0xF << Direction_Shift),
|
||||
};
|
||||
|
||||
static constexpr size_t MaxManagerCount = 10;
|
||||
private:
|
||||
class Impl {
|
||||
private:
|
||||
using RefCount = u16;
|
||||
private:
|
||||
KPageHeap heap;
|
||||
RefCount *page_reference_counts;
|
||||
KVirtualAddress metadata_region;
|
||||
Pool pool;
|
||||
Impl *next;
|
||||
Impl *prev;
|
||||
public:
|
||||
constexpr Impl() : heap(), page_reference_counts(), metadata_region(), pool(), next(), prev() { /* ... */ }
|
||||
|
||||
size_t Initialize(const KMemoryRegion *region, Pool pool, KVirtualAddress metadata_region, KVirtualAddress metadata_region_end);
|
||||
|
||||
KVirtualAddress AllocateBlock(s32 index) { return this->heap.AllocateBlock(index); }
|
||||
void Free(KVirtualAddress addr, size_t num_pages) { this->heap.Free(addr, num_pages); }
|
||||
|
||||
void TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages);
|
||||
|
||||
constexpr size_t GetSize() const { return this->heap.GetSize(); }
|
||||
constexpr KVirtualAddress GetEndAddress() const { return this->heap.GetEndAddress(); }
|
||||
|
||||
constexpr void SetNext(Impl *n) { this->next = n; }
|
||||
constexpr void SetPrev(Impl *n) { this->prev = n; }
|
||||
constexpr Impl *GetNext() const { return this->next; }
|
||||
constexpr Impl *GetPrev() const { return this->prev; }
|
||||
|
||||
void Open(KLightLock *pool_locks, KVirtualAddress address, size_t num_pages) {
|
||||
KScopedLightLock lk(pool_locks[this->pool]);
|
||||
|
||||
size_t index = this->heap.GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
while (index < end) {
|
||||
const RefCount ref_count = (++this->page_reference_counts[index]);
|
||||
MESOSPHERE_ABORT_UNLESS(ref_count > 0);
|
||||
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
void Close(KLightLock *pool_locks, KVirtualAddress address, size_t num_pages) {
|
||||
KScopedLightLock lk(pool_locks[this->pool]);
|
||||
|
||||
size_t index = this->heap.GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
|
||||
size_t free_start = 0;
|
||||
size_t free_count = 0;
|
||||
while (index < end) {
|
||||
MESOSPHERE_ABORT_UNLESS(this->page_reference_counts[index] > 0);
|
||||
const RefCount ref_count = (--this->page_reference_counts[index]);
|
||||
|
||||
/* Keep track of how many zero refcounts we see in a row, to minimize calls to free. */
|
||||
if (ref_count == 0) {
|
||||
if (free_count > 0) {
|
||||
free_count++;
|
||||
} else {
|
||||
free_start = index;
|
||||
free_count = 1;
|
||||
}
|
||||
} else {
|
||||
if (free_count > 0) {
|
||||
this->Free(this->heap.GetAddress() + free_start * PageSize, free_count);
|
||||
free_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
index++;
|
||||
}
|
||||
|
||||
if (free_count > 0) {
|
||||
this->Free(this->heap.GetAddress() + free_start * PageSize, free_count);
|
||||
}
|
||||
}
|
||||
public:
|
||||
static size_t CalculateMetadataOverheadSize(size_t region_size);
|
||||
};
|
||||
private:
|
||||
KLightLock pool_locks[Pool_Count];
|
||||
Impl *pool_managers_head[Pool_Count];
|
||||
Impl *pool_managers_tail[Pool_Count];
|
||||
Impl managers[MaxManagerCount];
|
||||
size_t num_managers;
|
||||
u64 optimized_process_ids[Pool_Count];
|
||||
bool has_optimized_process[Pool_Count];
|
||||
private:
|
||||
Impl &GetManager(KVirtualAddress address) {
|
||||
return this->managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
||||
}
|
||||
|
||||
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
|
||||
return dir == Direction_FromBack ? this->pool_managers_tail[pool] : this->pool_managers_head[pool];
|
||||
}
|
||||
|
||||
constexpr Impl *GetNextManager(Impl *cur, Direction dir) {
|
||||
if (dir == Direction_FromBack) {
|
||||
return cur->GetPrev();
|
||||
} else {
|
||||
return cur->GetNext();
|
||||
}
|
||||
}
|
||||
public:
|
||||
constexpr KMemoryManager()
|
||||
: pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process()
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
NOINLINE void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size);
|
||||
|
||||
NOINLINE KVirtualAddress AllocateContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
NOINLINE Result Allocate(KPageGroup *out, size_t num_pages, u32 option);
|
||||
|
||||
void Open(KVirtualAddress address, size_t num_pages) {
|
||||
/* Repeatedly open references until we've done so for all pages. */
|
||||
while (num_pages) {
|
||||
auto &manager = this->GetManager(address);
|
||||
const size_t cur_pages = std::min(num_pages, (manager.GetEndAddress() - address) / PageSize);
|
||||
manager.Open(this->pool_locks, address, cur_pages);
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
void Close(KVirtualAddress address, size_t num_pages) {
|
||||
/* Repeatedly close references until we've done so for all pages. */
|
||||
while (num_pages) {
|
||||
auto &manager = this->GetManager(address);
|
||||
const size_t cur_pages = std::min(num_pages, (manager.GetEndAddress() - address) / PageSize);
|
||||
manager.Close(this->pool_locks, address, cur_pages);
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
public:
|
||||
static size_t CalculateMetadataOverheadSize(size_t region_size) {
|
||||
return Impl::CalculateMetadataOverheadSize(region_size);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE u32 EncodeOption(Pool pool, Direction dir) {
|
||||
return (pool << Pool_Shift) | (dir << Direction_Shift);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE Pool GetPool(u32 option) {
|
||||
return static_cast<Pool>((option & Pool_Mask) >> Pool_Shift);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE Direction GetDirection(u32 option) {
|
||||
return static_cast<Direction>((option & Direction_Mask) >> Direction_Shift);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE std::tuple<Pool, Direction> DecodeOption(u32 option) {
|
||||
return std::make_tuple(GetPool(option), GetDirection(option));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_k_light_lock.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KObjectName : public KSlabAllocated<KObjectName>, public util::IntrusiveListBaseNode<KObjectName> {
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_memory_layout.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KPageBuffer : public KSlabAllocated<KPageBuffer> {
|
||||
private:
|
||||
alignas(PageSize) u8 buffer[PageSize];
|
||||
public:
|
||||
KPageBuffer() {
|
||||
std::memset(buffer, 0, sizeof(buffer));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const {
|
||||
return KMemoryLayout::GetLinearPhysicalAddress(KVirtualAddress(this));
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KPageBuffer *FromPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||
const KVirtualAddress virt_addr = KMemoryLayout::GetLinearVirtualAddress(phys_addr);
|
||||
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
|
||||
return GetPointer<KPageBuffer>(virt_addr);
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(KPageBuffer) == PageSize);
|
||||
static_assert(alignof(KPageBuffer) == PageSize);
|
||||
|
||||
}
|
125
libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp
Normal file
125
libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp
Normal file
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KBlockInfoManager;
|
||||
|
||||
class KBlockInfo : public util::IntrusiveListBaseNode<KBlockInfo> {
|
||||
private:
|
||||
KVirtualAddress address;
|
||||
size_t num_pages;
|
||||
public:
|
||||
constexpr KBlockInfo() : util::IntrusiveListBaseNode<KBlockInfo>(), address(), num_pages() { /* ... */ }
|
||||
|
||||
constexpr void Initialize(KVirtualAddress addr, size_t np) {
|
||||
this->address = addr;
|
||||
this->num_pages = np;
|
||||
}
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return this->address; }
|
||||
constexpr size_t GetNumPages() const { return this->num_pages; }
|
||||
constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||
constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; }
|
||||
|
||||
constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
||||
return this->address == rhs.address && this->num_pages == rhs.num_pages;
|
||||
}
|
||||
|
||||
constexpr bool operator==(const KBlockInfo &rhs) const {
|
||||
return this->IsEquivalentTo(rhs);
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const KBlockInfo &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
constexpr bool IsStrictlyBefore(KVirtualAddress addr) const {
|
||||
const KVirtualAddress end = this->GetEndAddress();
|
||||
|
||||
if (this->address != Null<KVirtualAddress> && end == Null<KVirtualAddress>) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return end < addr;
|
||||
}
|
||||
|
||||
constexpr bool operator<(KVirtualAddress addr) const {
|
||||
return this->IsStrictlyBefore(addr);
|
||||
}
|
||||
|
||||
constexpr bool TryConcatenate(KVirtualAddress addr, size_t np) {
|
||||
if (addr != Null<KVirtualAddress> && addr == this->GetEndAddress()) {
|
||||
this->num_pages += np;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
class KPageGroup {
|
||||
public:
|
||||
using BlockInfoList = util::IntrusiveListBaseTraits<KBlockInfo>::ListType;
|
||||
using iterator = BlockInfoList::const_iterator;
|
||||
private:
|
||||
BlockInfoList block_list;
|
||||
KBlockInfoManager *manager;
|
||||
public:
|
||||
explicit KPageGroup(KBlockInfoManager *m) : block_list(), manager(m) { /* ... */ }
|
||||
~KPageGroup() { this->Finalize(); }
|
||||
|
||||
void Finalize();
|
||||
|
||||
iterator begin() const { return this->block_list.begin(); }
|
||||
iterator end() const { return this->block_list.end(); }
|
||||
bool empty() const { return this->block_list.empty(); }
|
||||
|
||||
Result AddBlock(KVirtualAddress addr, size_t num_pages);
|
||||
void Open() const;
|
||||
void Close() const;
|
||||
|
||||
size_t GetNumPages() const;
|
||||
|
||||
bool IsEquivalentTo(const KPageGroup &rhs) const;
|
||||
|
||||
bool operator==(const KPageGroup &rhs) const {
|
||||
return this->IsEquivalentTo(rhs);
|
||||
}
|
||||
|
||||
bool operator!=(const KPageGroup &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
};
|
||||
|
||||
class KScopedPageGroup {
|
||||
private:
|
||||
const KPageGroup *group;
|
||||
public:
|
||||
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp) : group(gp) { if (this->group) { this->group->Open(); } }
|
||||
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ }
|
||||
ALWAYS_INLINE ~KScopedPageGroup() { if (this->group) { this->group->Close(); } }
|
||||
|
||||
ALWAYS_INLINE void CancelClose() {
|
||||
this->group = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
326
libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp
Normal file
326
libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp
Normal file
|
@ -0,0 +1,326 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KPageHeap {
|
||||
private:
|
||||
static constexpr inline size_t MemoryBlockPageShifts[] = { 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E };
|
||||
static constexpr size_t NumMemoryBlockPageShifts = util::size(MemoryBlockPageShifts);
|
||||
public:
|
||||
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
|
||||
const size_t target_pages = std::max(num_pages, align_pages);
|
||||
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||
if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return static_cast<s32>(i);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr s32 GetBlockIndex(size_t num_pages) {
|
||||
for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
|
||||
if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr size_t GetBlockSize(size_t index) {
|
||||
return size_t(1) << MemoryBlockPageShifts[index];
|
||||
}
|
||||
|
||||
static constexpr size_t GetBlockNumPages(size_t index) {
|
||||
return GetBlockSize(index) / PageSize;
|
||||
}
|
||||
private:
|
||||
class Block {
|
||||
private:
|
||||
class Bitmap {
|
||||
public:
|
||||
static constexpr size_t MaxDepth = 4;
|
||||
private:
|
||||
u64 *bit_storages[MaxDepth];
|
||||
size_t num_bits;
|
||||
size_t used_depths;
|
||||
public:
|
||||
constexpr Bitmap() : bit_storages(), num_bits(), used_depths() { /* ... */ }
|
||||
|
||||
constexpr size_t GetNumBits() const { return this->num_bits; }
|
||||
constexpr s32 GetHighestDepthIndex() const { return static_cast<s32>(this->used_depths) - 1; }
|
||||
|
||||
u64 *Initialize(u64 *storage, size_t size) {
|
||||
/* Initially, everything is un-set. */
|
||||
this->num_bits = 0;
|
||||
|
||||
/* Calculate the needed bitmap depth. */
|
||||
this->used_depths = static_cast<size_t>(GetRequiredDepth(size));
|
||||
MESOSPHERE_ASSERT(this->used_depths <= MaxDepth);
|
||||
|
||||
/* Set the bitmap pointers. */
|
||||
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
|
||||
this->bit_storages[depth] = storage;
|
||||
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
||||
storage += size;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
ssize_t FindFreeBlock() const {
|
||||
uintptr_t offset = 0;
|
||||
s32 depth = 0;
|
||||
|
||||
do {
|
||||
const u64 v = this->bit_storages[depth][offset];
|
||||
if (v == 0) {
|
||||
/* If depth is bigger than zero, then a previous level indicated a block was free. */
|
||||
MESOSPHERE_ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * BITSIZEOF(u64) + __builtin_ctzll(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(this->used_depths));
|
||||
|
||||
return static_cast<ssize_t>(offset);
|
||||
}
|
||||
|
||||
void SetBit(size_t offset) {
|
||||
this->SetBit(this->GetHighestDepthIndex(), offset);
|
||||
this->num_bits++;
|
||||
}
|
||||
|
||||
void ClearBit(size_t offset) {
|
||||
this->ClearBit(this->GetHighestDepthIndex(), offset);
|
||||
this->num_bits--;
|
||||
}
|
||||
|
||||
bool ClearRange(size_t offset, size_t count) {
|
||||
s32 depth = this->GetHighestDepthIndex();
|
||||
u64 *bits = this->bit_storages[depth];
|
||||
size_t bit_ind = offset / BITSIZEOF(u64);
|
||||
if (AMS_LIKELY(count < BITSIZEOF(u64))) {
|
||||
const size_t shift = offset % BITSIZEOF(u64);
|
||||
MESOSPHERE_ASSERT(shift + count <= BITSIZEOF(u64));
|
||||
/* Check that all the bits are set. */
|
||||
const u64 mask = ((u64(1) << count) - 1) << shift;
|
||||
u64 v = bits[bit_ind];
|
||||
if ((v & mask) != mask) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Clear the bits. */
|
||||
v &= ~mask;
|
||||
bits[bit_ind] = v;
|
||||
if (v == 0) {
|
||||
this->ClearBit(depth - 1, bit_ind);
|
||||
}
|
||||
} else {
|
||||
MESOSPHERE_ASSERT(offset % BITSIZEOF(u64) == 0);
|
||||
MESOSPHERE_ASSERT(count % BITSIZEOF(u64) == 0);
|
||||
/* Check that all the bits are set. */
|
||||
size_t remaining = count;
|
||||
size_t i = 0;
|
||||
do {
|
||||
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||
return false;
|
||||
}
|
||||
remaining -= BITSIZEOF(u64);
|
||||
} while (remaining > 0);
|
||||
|
||||
/* Clear the bits. */
|
||||
remaining = count;
|
||||
i = 0;
|
||||
do {
|
||||
bits[bit_ind + i] = 0;
|
||||
this->ClearBit(depth - 1, bit_ind + i);
|
||||
i++;
|
||||
remaining -= BITSIZEOF(u64);
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
this->num_bits -= count;
|
||||
return true;
|
||||
}
|
||||
private:
|
||||
void SetBit(s32 depth, size_t offset) {
|
||||
while (depth >= 0) {
|
||||
size_t ind = offset / BITSIZEOF(u64);
|
||||
size_t which = offset % BITSIZEOF(u64);
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64 *bit = std::addressof(this->bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
MESOSPHERE_ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
void ClearBit(s32 depth, size_t offset) {
|
||||
while (depth >= 0) {
|
||||
size_t ind = offset / BITSIZEOF(u64);
|
||||
size_t which = offset % BITSIZEOF(u64);
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64 *bit = std::addressof(this->bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
MESOSPHERE_ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
private:
|
||||
static constexpr s32 GetRequiredDepth(size_t region_size) {
|
||||
s32 depth = 0;
|
||||
while (true) {
|
||||
region_size /= BITSIZEOF(u64);
|
||||
depth++;
|
||||
if (region_size == 0) {
|
||||
return depth;
|
||||
}
|
||||
}
|
||||
}
|
||||
public:
|
||||
static constexpr size_t CalculateMetadataOverheadSize(size_t region_size) {
|
||||
size_t overhead_bits = 0;
|
||||
for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) {
|
||||
region_size = util::AlignUp(region_size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
||||
overhead_bits += region_size;
|
||||
}
|
||||
return overhead_bits * sizeof(u64);
|
||||
}
|
||||
};
|
||||
private:
|
||||
Bitmap bitmap;
|
||||
KVirtualAddress heap_address;
|
||||
uintptr_t end_offset;
|
||||
size_t block_shift;
|
||||
size_t next_block_shift;
|
||||
public:
|
||||
constexpr Block() : bitmap(), heap_address(), end_offset(), block_shift(), next_block_shift() { /* ... */ }
|
||||
|
||||
constexpr size_t GetShift() const { return this->block_shift; }
|
||||
constexpr size_t GetNextShift() const { return this->next_block_shift; }
|
||||
constexpr size_t GetSize() const { return u64(1) << this->GetShift(); }
|
||||
constexpr size_t GetNumPages() const { return this->GetSize() / PageSize; }
|
||||
constexpr size_t GetNumFreeBlocks() const { return this->bitmap.GetNumBits(); }
|
||||
constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); }
|
||||
|
||||
u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
|
||||
/* Set shifts. */
|
||||
this->block_shift = bs;
|
||||
this->next_block_shift = nbs;
|
||||
|
||||
/* Align up the address. */
|
||||
KVirtualAddress end = addr + size;
|
||||
const size_t align = (this->next_block_shift != 0) ? (u64(1) << this->next_block_shift) : (this->block_shift);
|
||||
addr = util::AlignDown(GetInteger(addr), align);
|
||||
end = util::AlignUp(GetInteger(end), align);
|
||||
|
||||
this->heap_address = addr;
|
||||
this->end_offset = (end - addr) / (u64(1) << this->block_shift);
|
||||
return this->bitmap.Initialize(bit_storage, this->end_offset);
|
||||
}
|
||||
|
||||
KVirtualAddress PushBlock(KVirtualAddress address) {
|
||||
/* Set the bit for the free block. */
|
||||
size_t offset = (address - this->heap_address) >> this->GetShift();
|
||||
this->bitmap.SetBit(offset);
|
||||
|
||||
/* If we have a next shift, try to clear the blocks below this one and return the new address. */
|
||||
if (this->GetNextShift()) {
|
||||
const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift());
|
||||
offset = util::AlignDown(offset, diff);
|
||||
if (this->bitmap.ClearRange(offset, diff)) {
|
||||
return this->heap_address + (offset << this->GetShift());
|
||||
}
|
||||
}
|
||||
|
||||
/* We couldn't coalesce, or we're already as big as possible. */
|
||||
return Null<KVirtualAddress>;
|
||||
}
|
||||
|
||||
KVirtualAddress PopBlock() {
|
||||
/* Find a free block. */
|
||||
ssize_t soffset = this->bitmap.FindFreeBlock();
|
||||
if (soffset < 0) {
|
||||
return Null<KVirtualAddress>;
|
||||
}
|
||||
const size_t offset = static_cast<size_t>(soffset);
|
||||
|
||||
/* Update our tracking and return it. */
|
||||
this->bitmap.ClearBit(offset);
|
||||
return this->heap_address + (offset << this->GetShift());
|
||||
}
|
||||
public:
|
||||
static constexpr size_t CalculateMetadataOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) {
|
||||
const size_t cur_block_size = (u64(1) << cur_block_shift);
|
||||
const size_t next_block_size = (u64(1) << next_block_shift);
|
||||
const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size;
|
||||
return Bitmap::CalculateMetadataOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size);
|
||||
}
|
||||
};
|
||||
private:
|
||||
KVirtualAddress heap_address;
|
||||
size_t heap_size;
|
||||
size_t used_size;
|
||||
size_t num_blocks;
|
||||
Block blocks[NumMemoryBlockPageShifts];
|
||||
private:
|
||||
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||
size_t GetNumFreePages() const;
|
||||
|
||||
void FreeBlock(KVirtualAddress block, s32 index);
|
||||
public:
|
||||
constexpr KPageHeap() : heap_address(), heap_size(), used_size(), num_blocks(), blocks() { /* ... */ }
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return this->heap_address; }
|
||||
constexpr size_t GetSize() const { return this->heap_size; }
|
||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||
constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; }
|
||||
|
||||
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size) {
|
||||
return Initialize(heap_address, heap_size, metadata_address, metadata_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
|
||||
}
|
||||
|
||||
void UpdateUsedSize() {
|
||||
this->used_size = this->heap_size - (this->GetNumFreePages() * PageSize);
|
||||
}
|
||||
|
||||
KVirtualAddress AllocateBlock(s32 index);
|
||||
void Free(KVirtualAddress addr, size_t num_pages);
|
||||
private:
|
||||
static size_t CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||
public:
|
||||
static size_t CalculateMetadataOverheadSize(size_t region_size) {
|
||||
return CalculateMetadataOverheadSize(region_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,311 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_page_table_impl.hpp>
|
||||
#include <mesosphere/kern_k_light_lock.hpp>
|
||||
#include <mesosphere/kern_k_page_group.hpp>
|
||||
#include <mesosphere/kern_k_memory_manager.hpp>
|
||||
#include <mesosphere/kern_k_memory_layout.hpp>
|
||||
#include <mesosphere/kern_k_memory_block_manager.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
struct KPageProperties {
|
||||
KMemoryPermission perm;
|
||||
bool io;
|
||||
bool uncached;
|
||||
bool non_contiguous;
|
||||
};
|
||||
static_assert(std::is_trivial<KPageProperties>::value);
|
||||
|
||||
class KPageTableBase {
|
||||
NON_COPYABLE(KPageTableBase);
|
||||
NON_MOVEABLE(KPageTableBase);
|
||||
public:
|
||||
using TraversalEntry = KPageTableImpl::TraversalEntry;
|
||||
using TraversalContext = KPageTableImpl::TraversalContext;
|
||||
protected:
|
||||
enum MemoryFillValue {
|
||||
MemoryFillValue_Zero = 0,
|
||||
MemoryFillValue_Stack = 'X',
|
||||
MemoryFillValue_Ipc = 'Y',
|
||||
MemoryFillValue_Heap = 'Z',
|
||||
};
|
||||
|
||||
enum OperationType {
|
||||
OperationType_Map = 0,
|
||||
OperationType_MapGroup = 1,
|
||||
OperationType_Unmap = 2,
|
||||
OperationType_ChangePermissions = 3,
|
||||
OperationType_ChangePermissionsAndRefresh = 4,
|
||||
/* TODO: perm/attr operations */
|
||||
};
|
||||
|
||||
static constexpr size_t MaxPhysicalMapAlignment = 1_GB;
|
||||
static constexpr size_t RegionAlignment = 2_MB;
|
||||
static_assert(RegionAlignment == KernelAslrAlignment);
|
||||
|
||||
struct PageLinkedList {
|
||||
private:
|
||||
struct Node {
|
||||
Node *next;
|
||||
u8 buffer[PageSize - sizeof(Node *)];
|
||||
};
|
||||
static_assert(std::is_pod<Node>::value);
|
||||
private:
|
||||
Node *root;
|
||||
public:
|
||||
constexpr PageLinkedList() : root(nullptr) { /* ... */ }
|
||||
|
||||
void Push(Node *n) {
|
||||
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
|
||||
n->next = this->root;
|
||||
this->root = n;
|
||||
}
|
||||
|
||||
void Push(KVirtualAddress addr) {
|
||||
this->Push(GetPointer<Node>(addr));
|
||||
}
|
||||
|
||||
Node *Peek() const { return this->root; }
|
||||
|
||||
Node *Pop() {
|
||||
Node *r = this->root;
|
||||
this->root = this->root->next;
|
||||
return r;
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<PageLinkedList>::value);
|
||||
|
||||
static constexpr u32 DefaultMemoryIgnoreAttr = KMemoryAttribute_DontCareMask | KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
|
||||
|
||||
static constexpr size_t GetAddressSpaceWidth(ams::svc::CreateProcessFlag as_type) {
|
||||
switch (static_cast<ams::svc::CreateProcessFlag>(as_type & ams::svc::CreateProcessFlag_AddressSpaceMask)) {
|
||||
case ams::svc::CreateProcessFlag_AddressSpace64Bit:
|
||||
return 39;
|
||||
case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated:
|
||||
return 36;
|
||||
case ams::svc::CreateProcessFlag_AddressSpace32Bit:
|
||||
case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias:
|
||||
return 32;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
private:
|
||||
class KScopedPageTableUpdater {
|
||||
private:
|
||||
KPageTableBase *page_table;
|
||||
PageLinkedList ll;
|
||||
public:
|
||||
ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase *pt) : page_table(pt), ll() { /* ... */ }
|
||||
ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase &pt) : KScopedPageTableUpdater(std::addressof(pt)) { /* ... */ }
|
||||
ALWAYS_INLINE ~KScopedPageTableUpdater() { this->page_table->FinalizeUpdate(this->GetPageList()); }
|
||||
|
||||
PageLinkedList *GetPageList() { return std::addressof(this->ll); }
|
||||
};
|
||||
private:
|
||||
KProcessAddress address_space_start;
|
||||
KProcessAddress address_space_end;
|
||||
KProcessAddress heap_region_start;
|
||||
KProcessAddress heap_region_end;
|
||||
KProcessAddress current_heap_end;
|
||||
KProcessAddress alias_region_start;
|
||||
KProcessAddress alias_region_end;
|
||||
KProcessAddress stack_region_start;
|
||||
KProcessAddress stack_region_end;
|
||||
KProcessAddress kernel_map_region_start;
|
||||
KProcessAddress kernel_map_region_end;
|
||||
KProcessAddress alias_code_region_start;
|
||||
KProcessAddress alias_code_region_end;
|
||||
KProcessAddress code_region_start;
|
||||
KProcessAddress code_region_end;
|
||||
size_t max_heap_size;
|
||||
size_t max_physical_memory_size;
|
||||
mutable KLightLock general_lock;
|
||||
mutable KLightLock map_physical_memory_lock;
|
||||
KPageTableImpl impl;
|
||||
KMemoryBlockManager memory_block_manager;
|
||||
u32 allocate_option;
|
||||
u32 address_space_width;
|
||||
bool is_kernel;
|
||||
bool enable_aslr;
|
||||
KMemoryBlockSlabManager *memory_block_slab_manager;
|
||||
KBlockInfoManager *block_info_manager;
|
||||
const KMemoryRegion *cached_physical_linear_region;
|
||||
const KMemoryRegion *cached_physical_heap_region;
|
||||
const KMemoryRegion *cached_virtual_heap_region;
|
||||
MemoryFillValue heap_fill_value;
|
||||
MemoryFillValue ipc_fill_value;
|
||||
MemoryFillValue stack_fill_value;
|
||||
public:
|
||||
constexpr KPageTableBase() :
|
||||
address_space_start(), address_space_end(), heap_region_start(), heap_region_end(), current_heap_end(),
|
||||
alias_region_start(), alias_region_end(), stack_region_start(), stack_region_end(), kernel_map_region_start(),
|
||||
kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(),
|
||||
max_heap_size(), max_physical_memory_size(), general_lock(), map_physical_memory_lock(), impl(), memory_block_manager(),
|
||||
allocate_option(), address_space_width(), is_kernel(), enable_aslr(), memory_block_slab_manager(), block_info_manager(),
|
||||
cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(),
|
||||
heap_fill_value(), ipc_fill_value(), stack_fill_value()
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
|
||||
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager);
|
||||
|
||||
void Finalize();
|
||||
|
||||
constexpr bool IsKernel() const { return this->is_kernel; }
|
||||
constexpr bool IsAslrEnabled() const { return this->enable_aslr; }
|
||||
|
||||
constexpr bool Contains(KProcessAddress addr) const {
|
||||
return this->address_space_start <= addr && addr <= this->address_space_end - 1;
|
||||
}
|
||||
|
||||
constexpr bool Contains(KProcessAddress addr, size_t size) const {
|
||||
return this->address_space_start <= addr && addr < addr + size && addr + size - 1 <= this->address_space_end - 1;
|
||||
}
|
||||
|
||||
KProcessAddress GetRegionAddress(KMemoryState state) const;
|
||||
size_t GetRegionSize(KMemoryState state) const;
|
||||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
|
||||
protected:
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
|
||||
virtual void FinalizeUpdate(PageLinkedList *page_list) = 0;
|
||||
|
||||
KPageTableImpl &GetImpl() { return this->impl; }
|
||||
const KPageTableImpl &GetImpl() const { return this->impl; }
|
||||
|
||||
KBlockInfoManager *GetBlockInfoManager() const { return this->block_info_manager; }
|
||||
|
||||
bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); }
|
||||
|
||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, this->cached_physical_heap_region);
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, size, this->cached_physical_heap_region);
|
||||
}
|
||||
|
||||
bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapVirtualAddress(std::addressof(this->cached_virtual_heap_region), virt_addr, this->cached_virtual_heap_region);
|
||||
}
|
||||
|
||||
bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapVirtualAddress(std::addressof(this->cached_virtual_heap_region), virt_addr, size, this->cached_virtual_heap_region);
|
||||
}
|
||||
|
||||
bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
||||
return (this->address_space_start <= addr) && (num_pages <= (this->address_space_end - this->address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= this->address_space_end - 1);
|
||||
}
|
||||
private:
|
||||
constexpr size_t GetNumGuardPages() const { return this->IsKernel() ? 1 : 4; }
|
||||
ALWAYS_INLINE KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const;
|
||||
|
||||
Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
|
||||
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
Result CheckMemoryState(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return this->CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||
}
|
||||
|
||||
Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const;
|
||||
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties);
|
||||
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
|
||||
|
||||
Result MakePageGroup(KPageGroup &pg, KProcessAddress addr, size_t num_pages);
|
||||
bool IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages);
|
||||
|
||||
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
public:
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const {
|
||||
return this->GetImpl().GetPhysicalAddress(out, virt_addr);
|
||||
}
|
||||
|
||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
|
||||
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
|
||||
Result SetHeapSize(KProcessAddress *out, size_t size);
|
||||
Result SetMaxHeapSize(size_t size);
|
||||
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const;
|
||||
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
||||
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
||||
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm);
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, PageSize, Null<KPhysicalAddress>, false, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
|
||||
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
Result MapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm);
|
||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state);
|
||||
public:
|
||||
KProcessAddress GetAddressSpaceStart() const { return this->address_space_start; }
|
||||
KProcessAddress GetHeapRegionStart() const { return this->heap_region_start; }
|
||||
KProcessAddress GetAliasRegionStart() const { return this->alias_region_start; }
|
||||
KProcessAddress GetStackRegionStart() const { return this->stack_region_start; }
|
||||
KProcessAddress GetKernelMapRegionStart() const { return this->kernel_map_region_start; }
|
||||
|
||||
size_t GetAddressSpaceSize() const { return this->address_space_end - this->address_space_start; }
|
||||
size_t GetHeapRegionSize() const { return this->heap_region_end - this->heap_region_start; }
|
||||
size_t GetAliasRegionSize() const { return this->alias_region_end - this->alias_region_start; }
|
||||
size_t GetStackRegionSize() const { return this->stack_region_end - this->stack_region_start; }
|
||||
size_t GetKernelMapRegionSize() const { return this->kernel_map_region_end - this->kernel_map_region_start; }
|
||||
public:
|
||||
static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress addr) {
|
||||
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress addr) {
|
||||
return KMemoryLayout::GetLinearPhysicalAddress(addr);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
|
||||
return GetLinearVirtualAddress(addr);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
|
||||
return GetLinearPhysicalAddress(addr);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
|
||||
return GetLinearVirtualAddress(addr);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KPhysicalAddress GetPageTablePhysicalAddress(KVirtualAddress addr) {
|
||||
return GetLinearPhysicalAddress(addr);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_dynamic_slab_heap.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class PageTablePage {
|
||||
private:
|
||||
u8 buffer[PageSize];
|
||||
};
|
||||
static_assert(sizeof(PageTablePage) == PageSize);
|
||||
|
||||
}
|
||||
|
||||
class KPageTableManager : public KDynamicSlabHeap<impl::PageTablePage> {
|
||||
public:
|
||||
using RefCount = u16;
|
||||
static constexpr size_t PageTableSize = sizeof(impl::PageTablePage);
|
||||
static_assert(PageTableSize == PageSize);
|
||||
private:
|
||||
using BaseHeap = KDynamicSlabHeap<impl::PageTablePage>;
|
||||
private:
|
||||
RefCount *ref_counts;
|
||||
public:
|
||||
static constexpr size_t CalculateReferenceCountSize(size_t size) {
|
||||
return (size / PageSize) * sizeof(RefCount);
|
||||
}
|
||||
public:
|
||||
constexpr KPageTableManager() : BaseHeap(), ref_counts() { /* ... */ }
|
||||
private:
|
||||
void Initialize(RefCount *rc) {
|
||||
this->ref_counts = rc;
|
||||
for (size_t i = 0; i < this->GetSize() / PageSize; i++) {
|
||||
this->ref_counts[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr RefCount *GetRefCountPointer(KVirtualAddress addr) const {
|
||||
return std::addressof(this->ref_counts[(addr - this->GetAddress()) / PageSize]);
|
||||
}
|
||||
public:
|
||||
void Initialize(KDynamicPageManager *next_allocator, RefCount *rc) {
|
||||
BaseHeap::Initialize(next_allocator);
|
||||
this->Initialize(rc);
|
||||
}
|
||||
|
||||
void Initialize(KVirtualAddress memory, size_t sz, RefCount *rc) {
|
||||
BaseHeap::Initialize(memory, sz);
|
||||
this->Initialize(rc);
|
||||
}
|
||||
|
||||
KVirtualAddress Allocate() {
|
||||
return KVirtualAddress(BaseHeap::Allocate());
|
||||
}
|
||||
|
||||
void Free(KVirtualAddress addr) {
|
||||
BaseHeap::Free(GetPointer<impl::PageTablePage>(addr));
|
||||
}
|
||||
|
||||
RefCount GetRefCount(KVirtualAddress addr) const {
|
||||
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
||||
return *this->GetRefCountPointer(addr);
|
||||
}
|
||||
|
||||
void Open(KVirtualAddress addr, int count) {
|
||||
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
||||
|
||||
*this->GetRefCountPointer(addr) += count;
|
||||
|
||||
MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) > 0);
|
||||
}
|
||||
|
||||
bool Close(KVirtualAddress addr, int count) {
|
||||
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
||||
MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) >= count);
|
||||
|
||||
*this->GetRefCountPointer(addr) -= count;
|
||||
return this->GetRefCount(addr) == 0;
|
||||
}
|
||||
|
||||
constexpr bool IsInPageTableHeap(KVirtualAddress addr) const {
|
||||
return this->IsInRange(addr);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
29
libraries/libmesosphere/include/mesosphere/kern_k_port.hpp
Normal file
29
libraries/libmesosphere/include/mesosphere/kern_k_port.hpp
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_synchronization_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KPort, KAutoObject);
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,424 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
/*
|
||||
TODO: C++20
|
||||
|
||||
template<typename T>
|
||||
concept KPriorityQueueAffinityMask = !std::is_reference<T>::value && requires (T &t) {
|
||||
{ t.GetAffinityMask() } -> std::convertible_to<u64>;
|
||||
{ t.SetAffinityMask(std::declval<u64>()) };
|
||||
|
||||
{ t.GetAffinity(std::declval<int32_t>()) } -> std::same_as<bool>;
|
||||
{ t.SetAffinity(std::declval<int32_t>(), std::declval<bool>()) };
|
||||
{ t.SetAll() };
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
concept KPriorityQueueMember = !std::is_reference<T>::value && requires (T &t) {
|
||||
{ typename T::QueueEntry() };
|
||||
{ (typename T::QueueEntry()).Initialize() };
|
||||
{ (typename T::QueueEntry()).SetPrev(std::addressof(t)) };
|
||||
{ (typename T::QueueEntry()).SetNext(std::addressof(t)) };
|
||||
{ (typename T::QueueEntry()).GetNext() } -> std::same_as<T*>;
|
||||
{ (typename T::QueueEntry()).GetPrev() } -> std::same_as<T*>;
|
||||
{ t.GetPriorityQueueEntry(std::declval<s32>()) } -> std::same_as<typename T::QueueEntry &>;
|
||||
|
||||
{ t.GetAffinityMask() };
|
||||
{ typename std::remove_cvref<decltype(t.GetAffinityMask())>::type() } -> KPriorityQueueAffinityMask;
|
||||
|
||||
{ t.GetActiveCore() } -> std::convertible_to<s32>;
|
||||
{ t.GetPriority() } -> std::convertible_to<s32>;
|
||||
};
|
||||
*/
|
||||
|
||||
|
||||
template<typename Member, size_t _NumCores, int LowestPriority, int HighestPriority> /* TODO C++20: requires KPriorityQueueMember<Member> */
|
||||
class KPriorityQueue {
|
||||
public:
|
||||
using AffinityMaskType = typename std::remove_cv<typename std::remove_reference<decltype(std::declval<Member>().GetAffinityMask())>::type>::type;
|
||||
|
||||
static_assert(LowestPriority >= 0);
|
||||
static_assert(HighestPriority >= 0);
|
||||
static_assert(LowestPriority >= HighestPriority);
|
||||
static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
|
||||
static constexpr size_t NumCores = _NumCores;
|
||||
|
||||
static constexpr ALWAYS_INLINE bool IsValidCore(s32 core) {
|
||||
return 0 <= core && core < static_cast<s32>(NumCores);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE bool IsValidPriority(s32 priority) {
|
||||
return HighestPriority <= priority && priority <= LowestPriority + 1;
|
||||
}
|
||||
private:
|
||||
using Entry = typename Member::QueueEntry;
|
||||
public:
|
||||
class KPerCoreQueue {
|
||||
private:
|
||||
Entry root[NumCores];
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KPerCoreQueue() : root() {
|
||||
for (size_t i = 0; i < NumCores; i++) {
|
||||
this->root[i].Initialize();
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool PushBack(s32 core, Member *member) {
|
||||
/* Get the entry associated with the member. */
|
||||
Entry &member_entry = member->GetPriorityQueueEntry(core);
|
||||
|
||||
/* Get the entry associated with the end of the queue. */
|
||||
Member *tail = this->root[core].GetPrev();
|
||||
Entry &tail_entry = (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
|
||||
|
||||
/* Link the entries. */
|
||||
member_entry.SetPrev(tail);
|
||||
member_entry.SetNext(nullptr);
|
||||
tail_entry.SetNext(member);
|
||||
this->root[core].SetPrev(member);
|
||||
|
||||
return (tail == nullptr);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool PushFront(s32 core, Member *member) {
|
||||
/* Get the entry associated with the member. */
|
||||
Entry &member_entry = member->GetPriorityQueueEntry(core);
|
||||
|
||||
/* Get the entry associated with the front of the queue. */
|
||||
Member *head = this->root[core].GetNext();
|
||||
Entry &head_entry = (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
|
||||
|
||||
/* Link the entries. */
|
||||
member_entry.SetPrev(nullptr);
|
||||
member_entry.SetNext(head);
|
||||
head_entry.SetPrev(member);
|
||||
this->root[core].SetNext(member);
|
||||
|
||||
return (head == nullptr);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool Remove(s32 core, Member *member) {
|
||||
/* Get the entry associated with the member. */
|
||||
Entry &member_entry = member->GetPriorityQueueEntry(core);
|
||||
|
||||
/* Get the entries associated with next and prev. */
|
||||
Member *prev = member_entry.GetPrev();
|
||||
Member *next = member_entry.GetNext();
|
||||
Entry &prev_entry = (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core];
|
||||
Entry &next_entry = (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
|
||||
|
||||
/* Unlink. */
|
||||
prev_entry.SetNext(next);
|
||||
next_entry.SetPrev(prev);
|
||||
|
||||
return (this->GetFront(core) == nullptr);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *GetFront(s32 core) const {
|
||||
return this->root[core].GetNext();
|
||||
}
|
||||
};
|
||||
|
||||
class KPriorityQueueImpl {
|
||||
private:
|
||||
KPerCoreQueue queues[NumPriority];
|
||||
util::BitSet64<NumPriority> available_priorities[NumCores];
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KPriorityQueueImpl() : queues(), available_priorities() { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE void PushBack(s32 priority, s32 core, Member *member) {
|
||||
MESOSPHERE_ASSERT(IsValidCore(core));
|
||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||
if (this->queues[priority].PushBack(core, member)) {
|
||||
this->available_priorities[core].SetBit(priority);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void PushFront(s32 priority, s32 core, Member *member) {
|
||||
MESOSPHERE_ASSERT(IsValidCore(core));
|
||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||
if (this->queues[priority].PushFront(core, member)) {
|
||||
this->available_priorities[core].SetBit(priority);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void Remove(s32 priority, s32 core, Member *member) {
|
||||
MESOSPHERE_ASSERT(IsValidCore(core));
|
||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||
if (this->queues[priority].Remove(core, member)) {
|
||||
this->available_priorities[core].ClearBit(priority);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *GetFront(s32 core) const {
|
||||
MESOSPHERE_ASSERT(IsValidCore(core));
|
||||
|
||||
const s32 priority = this->available_priorities[core].CountLeadingZero();
|
||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||
return this->queues[priority].GetFront(core);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *GetFront(s32 priority, s32 core) const {
|
||||
MESOSPHERE_ASSERT(IsValidCore(core));
|
||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||
return this->queues[priority].GetFront(core);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *GetNext(s32 core, const Member *member) const {
|
||||
MESOSPHERE_ASSERT(IsValidCore(core));
|
||||
|
||||
Member *next = member->GetPriorityQueueEntry(core).GetNext();
|
||||
if (next == nullptr) {
|
||||
const s32 priority = this->available_priorities[core].GetNextSet(member->GetPriority());
|
||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||
next = this->queues[priority].GetFront(core);
|
||||
}
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void MoveToFront(s32 priority, s32 core, Member *member) {
|
||||
MESOSPHERE_ASSERT(IsValidCore(core));
|
||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||
this->queues[priority].Remove(core, member);
|
||||
this->queues[priority].PushFront(core, member);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *MoveToBack(s32 priority, s32 core, Member *member) {
|
||||
MESOSPHERE_ASSERT(IsValidCore(core));
|
||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||
|
||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||
this->queues[priority].Remove(core, member);
|
||||
this->queues[priority].PushBack(core, member);
|
||||
return this->queues[priority].GetFront(core);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
};
|
||||
private:
|
||||
KPriorityQueueImpl scheduled_queue;
|
||||
KPriorityQueueImpl suggested_queue;
|
||||
private:
|
||||
constexpr ALWAYS_INLINE void ClearAffinityBit(u64 &affinity, s32 core) {
|
||||
affinity &= ~(u64(1ul) << core);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE s32 GetNextCore(u64 &affinity) {
|
||||
const s32 core = __builtin_ctzll(static_cast<unsigned long long>(affinity));
|
||||
ClearAffinityBit(affinity, core);
|
||||
return core;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void PushBack(s32 priority, Member *member) {
|
||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||
|
||||
/* Push onto the scheduled queue for its core, if we can. */
|
||||
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||
this->scheduled_queue.PushBack(priority, core, member);
|
||||
ClearAffinityBit(affinity, core);
|
||||
}
|
||||
|
||||
/* And suggest the thread for all other cores. */
|
||||
while (affinity) {
|
||||
this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void PushFront(s32 priority, Member *member) {
|
||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||
|
||||
/* Push onto the scheduled queue for its core, if we can. */
|
||||
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||
this->scheduled_queue.PushFront(priority, core, member);
|
||||
ClearAffinityBit(affinity, core);
|
||||
}
|
||||
|
||||
/* And suggest the thread for all other cores. */
|
||||
/* Note: Nintendo pushes onto the back of the suggested queue, not the front. */
|
||||
while (affinity) {
|
||||
this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void Remove(s32 priority, Member *member) {
|
||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||
|
||||
/* Remove from the scheduled queue for its core. */
|
||||
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||
this->scheduled_queue.Remove(priority, core, member);
|
||||
ClearAffinityBit(affinity, core);
|
||||
}
|
||||
|
||||
/* Remove from the suggested queue for all other cores. */
|
||||
while (affinity) {
|
||||
this->suggested_queue.Remove(priority, GetNextCore(affinity), member);
|
||||
}
|
||||
}
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */ }
|
||||
|
||||
/* Getters. */
|
||||
constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core) const {
|
||||
return this->scheduled_queue.GetFront(core);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core, s32 priority) const {
|
||||
return this->scheduled_queue.GetFront(priority, core);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core) const {
|
||||
return this->suggested_queue.GetFront(core);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core, s32 priority) const {
|
||||
return this->suggested_queue.GetFront(priority, core);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *GetScheduledNext(s32 core, const Member *member) const {
|
||||
return this->scheduled_queue.GetNext(core, member);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *GetSuggestedNext(s32 core, const Member *member) const {
|
||||
return this->suggested_queue.GetNext(core, member);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Member *GetSamePriorityNext(s32 core, const Member *member) const {
|
||||
return member->GetPriorityQueueEntry(core).GetNext();
|
||||
}
|
||||
|
||||
/* Mutators. */
|
||||
constexpr ALWAYS_INLINE void PushBack(Member *member) {
|
||||
this->PushBack(member->GetPriority(), member);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void Remove(Member *member) {
|
||||
this->Remove(member->GetPriority(), member);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void MoveToScheduledFront(Member *member) {
|
||||
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KThread *MoveToScheduledBack(Member *member) {
|
||||
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member);
|
||||
}
|
||||
|
||||
/* First class fancy operations. */
|
||||
constexpr ALWAYS_INLINE void ChangePriority(s32 prev_priority, bool is_running, Member *member) {
|
||||
MESOSPHERE_ASSERT(IsValidPriority(prev_priority));
|
||||
|
||||
/* Remove the member from the queues. */
|
||||
const s32 new_priority = member->GetPriority();
|
||||
this->Remove(prev_priority, member);
|
||||
|
||||
/* And enqueue. If the member is running, we want to keep it running. */
|
||||
if (is_running) {
|
||||
this->PushFront(new_priority, member);
|
||||
} else {
|
||||
this->PushBack(new_priority, member);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void ChangeAffinityMask(s32 prev_core, const AffinityMaskType &prev_affinity, Member *member) {
|
||||
/* Get the new information. */
|
||||
const s32 priority = member->GetPriority();
|
||||
const AffinityMaskType &new_affinity = member->GetAffinityMask();
|
||||
const s32 new_core = member->GetActiveCore();
|
||||
|
||||
/* Remove the member from all queues it was in before. */
|
||||
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
|
||||
if (prev_affinity.GetAffinity(core)) {
|
||||
if (core == prev_core) {
|
||||
this->scheduled_queue.Remove(priority, core, member);
|
||||
} else {
|
||||
this->suggested_queue.Remove(priority, core, member);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* And add the member to all queues it should be in now. */
|
||||
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
|
||||
if (new_affinity.GetAffinity(core)) {
|
||||
if (core == new_core) {
|
||||
this->scheduled_queue.PushBack(priority, core, member);
|
||||
} else {
|
||||
this->suggested_queue.PushBack(priority, core, member);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void ChangeCore(s32 prev_core, Member *member, bool to_front = false) {
|
||||
/* Get the new information. */
|
||||
const s32 new_core = member->GetActiveCore();
|
||||
const s32 priority = member->GetPriority();
|
||||
|
||||
/* We don't need to do anything if the core is the same. */
|
||||
if (prev_core != new_core) {
|
||||
/* Remove from the scheduled queue for the previous core. */
|
||||
if (prev_core >= 0) {
|
||||
this->scheduled_queue.Remove(priority, prev_core, member);
|
||||
}
|
||||
|
||||
/* Remove from the suggested queue and add to the scheduled queue for the new core. */
|
||||
if (new_core >= 0) {
|
||||
this->suggested_queue.Remove(priority, prev_core, member);
|
||||
if (to_front) {
|
||||
this->scheduled_queue.PushFront(priority, new_core, member);
|
||||
} else {
|
||||
this->scheduled_queue.PushBack(priority, new_core, member);
|
||||
}
|
||||
}
|
||||
|
||||
/* Add to the suggested queue for the previous core. */
|
||||
if (prev_core >= 0) {
|
||||
this->suggested_queue.PushBack(priority, prev_core, member);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
219
libraries/libmesosphere/include/mesosphere/kern_k_process.hpp
Normal file
219
libraries/libmesosphere/include/mesosphere/kern_k_process.hpp
Normal file
|
@ -0,0 +1,219 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_synchronization_object.hpp>
|
||||
#include <mesosphere/kern_k_handle_table.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
#include <mesosphere/kern_k_thread_local_page.hpp>
|
||||
#include <mesosphere/kern_k_shared_memory_info.hpp>
|
||||
#include <mesosphere/kern_k_worker_task.hpp>
|
||||
#include <mesosphere/kern_select_page_table.hpp>
|
||||
#include <mesosphere/kern_k_condition_variable.hpp>
|
||||
#include <mesosphere/kern_k_address_arbiter.hpp>
|
||||
#include <mesosphere/kern_k_capabilities.hpp>
|
||||
#include <mesosphere/kern_k_wait_object.hpp>
|
||||
#include <mesosphere/kern_k_dynamic_slab_heap.hpp>
|
||||
#include <mesosphere/kern_k_page_table_manager.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject>, public KWorkerTask {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
|
||||
public:
|
||||
enum State {
|
||||
State_Created = ams::svc::ProcessState_Created,
|
||||
State_CreatedAttached = ams::svc::ProcessState_CreatedAttached,
|
||||
State_Running = ams::svc::ProcessState_Running,
|
||||
State_Crashed = ams::svc::ProcessState_Crashed,
|
||||
State_RunningAttached = ams::svc::ProcessState_RunningAttached,
|
||||
State_Terminating = ams::svc::ProcessState_Terminating,
|
||||
State_Terminated = ams::svc::ProcessState_Terminated,
|
||||
State_DebugBreak = ams::svc::ProcessState_DebugBreak,
|
||||
};
|
||||
|
||||
using ThreadList = util::IntrusiveListMemberTraits<&KThread::process_list_node>::ListType;
|
||||
private:
|
||||
using SharedMemoryInfoList = util::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
|
||||
using TLPTree = util::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
||||
using TLPIterator = TLPTree::iterator;
|
||||
private:
|
||||
KProcessPageTable page_table{};
|
||||
std::atomic<size_t> used_kernel_memory_size{};
|
||||
TLPTree fully_used_tlp_tree{};
|
||||
TLPTree partially_used_tlp_tree{};
|
||||
s32 ideal_core_id{};
|
||||
void *attached_object{};
|
||||
KResourceLimit *resource_limit{};
|
||||
KVirtualAddress system_resource_address{};
|
||||
size_t system_resource_num_pages{};
|
||||
size_t memory_release_hint{};
|
||||
State state{};
|
||||
KLightLock lock{};
|
||||
KLightLock list_lock{};
|
||||
KConditionVariable cond_var{};
|
||||
KAddressArbiter address_arbiter{};
|
||||
u64 entropy[4]{};
|
||||
bool is_signaled{};
|
||||
bool is_initialized{};
|
||||
bool is_application{};
|
||||
char name[13]{};
|
||||
std::atomic<u16> num_threads{};
|
||||
u16 peak_num_threads{};
|
||||
u32 flags{};
|
||||
KMemoryManager::Pool memory_pool{};
|
||||
s64 schedule_count{};
|
||||
KCapabilities capabilities{};
|
||||
ams::svc::ProgramId program_id{};
|
||||
u64 process_id{};
|
||||
s64 creation_time{};
|
||||
KProcessAddress code_address{};
|
||||
size_t code_size{};
|
||||
size_t main_thread_stack_size{};
|
||||
size_t max_process_memory{};
|
||||
u32 version{};
|
||||
KHandleTable handle_table{};
|
||||
KProcessAddress plr_address{};
|
||||
KThread *exception_thread{};
|
||||
ThreadList thread_list{};
|
||||
SharedMemoryInfoList shared_memory_list{};
|
||||
bool is_suspended{};
|
||||
bool is_jit_debug{};
|
||||
ams::svc::DebugEvent jit_debug_event_type{};
|
||||
ams::svc::DebugException jit_debug_exception_type{};
|
||||
uintptr_t jit_debug_params[4]{};
|
||||
u64 jit_debug_thread_id{};
|
||||
KWaitObject wait_object{};
|
||||
KThread *running_threads[cpu::NumCores]{};
|
||||
u64 running_thread_idle_counts[cpu::NumCores]{};
|
||||
KThread *pinned_threads[cpu::NumCores]{};
|
||||
std::atomic<s32> num_created_threads{};
|
||||
std::atomic<s64> cpu_time{};
|
||||
std::atomic<s64> num_process_switches{};
|
||||
std::atomic<s64> num_thread_switches{};
|
||||
std::atomic<s64> num_fpu_switches{};
|
||||
std::atomic<s64> num_supervisor_calls{};
|
||||
std::atomic<s64> num_ipc_messages{};
|
||||
std::atomic<s64> num_ipc_replies{};
|
||||
std::atomic<s64> num_ipc_receives{};
|
||||
KDynamicPageManager dynamic_page_manager{};
|
||||
KMemoryBlockSlabManager memory_block_slab_manager{};
|
||||
KBlockInfoManager block_info_manager{};
|
||||
KPageTableManager page_table_manager{};
|
||||
private:
|
||||
Result Initialize(const ams::svc::CreateProcessParameter ¶ms);
|
||||
public:
|
||||
constexpr KProcess() { /* ... */ }
|
||||
virtual ~KProcess() { /* ... */ }
|
||||
|
||||
Result Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool);
|
||||
|
||||
constexpr const char *GetName() const { return this->name; }
|
||||
|
||||
constexpr u64 GetProcessId() const { return this->process_id; }
|
||||
|
||||
constexpr u64 GetCoreMask() const { return this->capabilities.GetCoreMask(); }
|
||||
constexpr u64 GetPriorityMask() const { return this->capabilities.GetPriorityMask(); }
|
||||
|
||||
constexpr void SetIdealCoreId(s32 core_id) { this->ideal_core_id = core_id; }
|
||||
|
||||
constexpr bool Is64Bit() const { return this->flags & ams::svc::CreateProcessFlag_Is64Bit; }
|
||||
|
||||
constexpr KProcessAddress GetEntryPoint() const { return this->code_address; }
|
||||
|
||||
constexpr bool IsSuspended() const {
|
||||
return this->is_suspended;
|
||||
}
|
||||
|
||||
KThread *GetPreemptionStatePinnedThread(s32 core_id) const {
|
||||
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
||||
return this->pinned_threads[core_id];
|
||||
}
|
||||
|
||||
void CopySvcPermissionsTo(KThread::StackParameters &sp) {
|
||||
this->capabilities.CopySvcPermissionsTo(sp);
|
||||
}
|
||||
|
||||
constexpr KResourceLimit *GetResourceLimit() const { return this->resource_limit; }
|
||||
|
||||
bool ReserveResource(ams::svc::LimitableResource which, s64 value);
|
||||
bool ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout);
|
||||
void ReleaseResource(ams::svc::LimitableResource which, s64 value);
|
||||
void ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint);
|
||||
|
||||
constexpr KProcessPageTable &GetPageTable() { return this->page_table; }
|
||||
constexpr const KProcessPageTable &GetPageTable() const { return this->page_table; }
|
||||
|
||||
constexpr KHandleTable &GetHandleTable() { return this->handle_table; }
|
||||
constexpr const KHandleTable &GetHandleTable() const { return this->handle_table; }
|
||||
|
||||
Result CreateThreadLocalRegion(KProcessAddress *out);
|
||||
void *GetThreadLocalRegionPointer(KProcessAddress addr);
|
||||
|
||||
void AddCpuTime(s64 diff) { this->cpu_time += diff; }
|
||||
void IncrementScheduledCount() { ++this->schedule_count; }
|
||||
|
||||
void IncrementThreadCount();
|
||||
void DecrementThreadCount();
|
||||
|
||||
void RegisterThread(KThread *thread);
|
||||
void UnregisterThread(KThread *thread);
|
||||
|
||||
Result Run(s32 priority, size_t stack_size);
|
||||
|
||||
void SetPreemptionState();
|
||||
|
||||
static void Switch(KProcess *cur_process, KProcess *next_process) {
|
||||
/* Set the current process pointer. */
|
||||
SetCurrentProcess(next_process);
|
||||
|
||||
/* Update the current page table. */
|
||||
if (next_process) {
|
||||
next_process->GetPageTable().Activate(next_process->GetProcessId());
|
||||
} else {
|
||||
Kernel::GetKernelPageTable().Activate();
|
||||
}
|
||||
}
|
||||
public:
|
||||
/* Overridden parent functions. */
|
||||
virtual bool IsInitialized() const override { return this->is_initialized; }
|
||||
|
||||
static void PostDestroy(uintptr_t arg) { /* ... */ }
|
||||
|
||||
virtual void Finalize() override;
|
||||
|
||||
virtual u64 GetId() const override { return this->GetProcessId(); }
|
||||
|
||||
virtual bool IsSignaled() const override {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
return this->is_signaled;
|
||||
}
|
||||
|
||||
virtual void DoWorkerTask() override;
|
||||
private:
|
||||
void ChangeState(State new_state) {
|
||||
if (this->state != new_state) {
|
||||
this->state = new_state;
|
||||
this->is_signaled = true;
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_synchronization_object.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KEvent;
|
||||
|
||||
class KReadableEvent : public KSynchronizationObject {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
|
||||
private:
|
||||
bool is_signaled;
|
||||
KEvent *parent_event;
|
||||
public:
|
||||
constexpr explicit KReadableEvent() : KSynchronizationObject(), is_signaled(), parent_event() { MESOSPHERE_ASSERT_THIS(); }
|
||||
virtual ~KReadableEvent() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
constexpr void Initialize(KEvent *parent) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
this->is_signaled = false;
|
||||
this->parent_event = parent;
|
||||
}
|
||||
|
||||
constexpr KEvent *GetParent() const { return this->parent_event; }
|
||||
|
||||
virtual bool IsSignaled() const override;
|
||||
virtual void Destroy() override;
|
||||
|
||||
virtual Result Signal();
|
||||
virtual Result Clear();
|
||||
virtual Result Reset();
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_light_lock.hpp>
|
||||
#include <mesosphere/kern_k_light_condition_variable.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KResourceLimit final : public KAutoObjectWithSlabHeapAndContainer<KResourceLimit, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
|
||||
private:
|
||||
s64 limit_values[ams::svc::LimitableResource_Count];
|
||||
s64 current_values[ams::svc::LimitableResource_Count];
|
||||
s64 current_hints[ams::svc::LimitableResource_Count];
|
||||
mutable KLightLock lock;
|
||||
s32 waiter_count;
|
||||
KLightConditionVariable cond_var;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KResourceLimit() : limit_values(), current_values(), current_hints(), lock(), waiter_count(), cond_var() { /* ... */ }
|
||||
virtual ~KResourceLimit() { /* ... */ }
|
||||
|
||||
static ALWAYS_INLINE void PostDestroy(uintptr_t arg) { /* ... */ }
|
||||
|
||||
void Initialize();
|
||||
virtual void Finalize() override;
|
||||
|
||||
s64 GetLimitValue(ams::svc::LimitableResource which) const;
|
||||
s64 GetCurrentValue(ams::svc::LimitableResource which) const;
|
||||
s64 GetFreeValue(ams::svc::LimitableResource which) const;
|
||||
|
||||
Result SetLimitValue(ams::svc::LimitableResource which, s64 value);
|
||||
|
||||
bool Reserve(ams::svc::LimitableResource which, s64 value);
|
||||
bool Reserve(ams::svc::LimitableResource which, s64 value, s64 timeout);
|
||||
void Release(ams::svc::LimitableResource which, s64 value);
|
||||
void Release(ams::svc::LimitableResource which, s64 value, s64 hint);
|
||||
};
|
||||
|
||||
}
|
171
libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp
Normal file
171
libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp
Normal file
|
@ -0,0 +1,171 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
#include <mesosphere/kern_k_priority_queue.hpp>
|
||||
#include <mesosphere/kern_k_scheduler_lock.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
using KSchedulerPriorityQueue = KPriorityQueue<KThread, cpu::NumCores, ams::svc::LowestThreadPriority, ams::svc::HighestThreadPriority>;
|
||||
static_assert(std::is_same<KSchedulerPriorityQueue::AffinityMaskType, KAffinityMask>::value);
|
||||
static_assert(KSchedulerPriorityQueue::NumCores == cpu::NumCores);
|
||||
static_assert(KSchedulerPriorityQueue::NumPriority == BITSIZEOF(u64));
|
||||
|
||||
class KScopedSchedulerLock;
|
||||
class KScopedSchedulerLockAndSleep;
|
||||
|
||||
class KScheduler {
|
||||
NON_COPYABLE(KScheduler);
|
||||
NON_MOVEABLE(KScheduler);
|
||||
public:
|
||||
using LockType = KAbstractSchedulerLock<KScheduler>;
|
||||
|
||||
static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
|
||||
static_assert(ams::svc::LowestThreadPriority >= HighestCoreMigrationAllowedPriority);
|
||||
static_assert(ams::svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
|
||||
|
||||
struct SchedulingState {
|
||||
std::atomic<bool> needs_scheduling;
|
||||
bool interrupt_task_thread_runnable;
|
||||
bool should_count_idle;
|
||||
u64 idle_count;
|
||||
KThread *highest_priority_thread;
|
||||
void *idle_thread_stack;
|
||||
};
|
||||
private:
|
||||
friend class KScopedSchedulerLock;
|
||||
friend class KScopedSchedulerLockAndSleep;
|
||||
static bool s_scheduler_update_needed;
|
||||
static LockType s_scheduler_lock;
|
||||
static KSchedulerPriorityQueue s_priority_queue;
|
||||
private:
|
||||
SchedulingState state;
|
||||
bool is_active;
|
||||
s32 core_id;
|
||||
KThread *prev_thread;
|
||||
s64 last_context_switch_time;
|
||||
KThread *idle_thread;
|
||||
public:
|
||||
constexpr KScheduler()
|
||||
: state(), is_active(false), core_id(0), prev_thread(nullptr), last_context_switch_time(0), idle_thread(nullptr)
|
||||
{
|
||||
this->state.needs_scheduling = true;
|
||||
this->state.interrupt_task_thread_runnable = false;
|
||||
this->state.should_count_idle = false;
|
||||
this->state.idle_count = 0;
|
||||
this->state.idle_thread_stack = nullptr;
|
||||
this->state.highest_priority_thread = nullptr;
|
||||
}
|
||||
|
||||
NOINLINE void Initialize(KThread *idle_thread);
|
||||
NOINLINE void Activate();
|
||||
|
||||
ALWAYS_INLINE void RequestScheduleOnInterrupt() {
|
||||
SetSchedulerUpdateNeeded();
|
||||
|
||||
if (CanSchedule()) {
|
||||
this->ScheduleOnInterrupt();
|
||||
}
|
||||
}
|
||||
private:
|
||||
/* Static private API. */
|
||||
static ALWAYS_INLINE bool IsSchedulerUpdateNeeded() { return s_scheduler_update_needed; }
|
||||
static ALWAYS_INLINE void SetSchedulerUpdateNeeded() { s_scheduler_update_needed = true; }
|
||||
static ALWAYS_INLINE void ClearSchedulerUpdateNeeded() { s_scheduler_update_needed = false; }
|
||||
static ALWAYS_INLINE KSchedulerPriorityQueue &GetPriorityQueue() { return s_priority_queue; }
|
||||
|
||||
static NOINLINE u64 UpdateHighestPriorityThreadsImpl();
|
||||
public:
|
||||
/* Static public API. */
|
||||
static ALWAYS_INLINE bool CanSchedule() { return GetCurrentThread().GetDisableDispatchCount() == 0; }
|
||||
static ALWAYS_INLINE bool IsSchedulerLockedByCurrentThread() { return s_scheduler_lock.IsLockedByCurrentThread(); }
|
||||
|
||||
static NOINLINE void SetInterruptTaskThreadRunnable();
|
||||
|
||||
static ALWAYS_INLINE void DisableScheduling() {
|
||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 0);
|
||||
GetCurrentThread().DisableDispatch();
|
||||
}
|
||||
|
||||
static NOINLINE void EnableScheduling(u64 cores_needing_scheduling) {
|
||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 1);
|
||||
|
||||
if (GetCurrentThread().GetDisableDispatchCount() > 1) {
|
||||
GetCurrentThread().EnableDispatch();
|
||||
} else {
|
||||
GetCurrentScheduler().RescheduleOtherCores(cores_needing_scheduling);
|
||||
GetCurrentScheduler().RescheduleCurrentCore();
|
||||
}
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE u64 UpdateHighestPriorityThreads() {
|
||||
if (IsSchedulerUpdateNeeded()) {
|
||||
return UpdateHighestPriorityThreadsImpl();
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static NOINLINE void OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state);
|
||||
static NOINLINE void OnThreadPriorityChanged(KThread *thread, s32 old_priority);
|
||||
static NOINLINE void OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core);
|
||||
|
||||
/* TODO: Yield operations */
|
||||
static NOINLINE void RotateScheduledQueue(s32 priority, s32 core_id);
|
||||
private:
|
||||
/* Instanced private API. */
|
||||
void ScheduleImpl();
|
||||
void SwitchThread(KThread *next_thread);
|
||||
|
||||
ALWAYS_INLINE void Schedule() {
|
||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
||||
MESOSPHERE_ASSERT(this->core_id == GetCurrentCoreId());
|
||||
|
||||
this->ScheduleImpl();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ScheduleOnInterrupt() {
|
||||
KScopedDisableDispatch dd;
|
||||
this->Schedule();
|
||||
}
|
||||
|
||||
void RescheduleOtherCores(u64 cores_needing_scheduling);
|
||||
|
||||
ALWAYS_INLINE void RescheduleCurrentCore() {
|
||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
||||
{
|
||||
/* Disable interrupts, and then context switch. */
|
||||
KScopedInterruptDisable intr_disable;
|
||||
ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); };
|
||||
|
||||
if (this->state.needs_scheduling) {
|
||||
Schedule();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NOINLINE u64 UpdateHighestPriorityThread(KThread *thread);
|
||||
};
|
||||
|
||||
class KScopedSchedulerLock : KScopedLock<KScheduler::LockType> {
|
||||
public:
|
||||
explicit ALWAYS_INLINE KScopedSchedulerLock() : KScopedLock(KScheduler::s_scheduler_lock) { /* ... */ }
|
||||
ALWAYS_INLINE ~KScopedSchedulerLock() { /* ... */ }
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_spin_lock.hpp>
|
||||
#include <mesosphere/kern_k_current_context.hpp>
|
||||
#include <mesosphere/kern_k_scoped_lock.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KThread;
|
||||
|
||||
/*
|
||||
TODO: C++20
|
||||
|
||||
template<typename T>
|
||||
concept KSchedulerLockable = !std::is_reference<T>::value && requires {
|
||||
{ T::DisableScheduling() } -> std::same_as<void>;
|
||||
{ T::EnableScheduling(std::declval<u64>()) } -> std::same_as<void>;
|
||||
{ T::UpdateHighestPriorityThreads() } -> std::convertible_to<u64>;
|
||||
};
|
||||
|
||||
*/
|
||||
|
||||
template<typename SchedulerType> /* TODO C++20: requires KSchedulerLockable<SchedulerType> */
|
||||
class KAbstractSchedulerLock {
|
||||
private:
|
||||
KAlignedSpinLock spin_lock;
|
||||
s32 lock_count;
|
||||
KThread *owner_thread;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KAbstractSchedulerLock() : spin_lock(), lock_count(0), owner_thread(nullptr) { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
ALWAYS_INLINE bool IsLockedByCurrentThread() const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
return this->owner_thread == GetCurrentThreadPointer();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void Lock() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
if (this->IsLockedByCurrentThread()) {
|
||||
/* If we already own the lock, we can just increment the count. */
|
||||
MESOSPHERE_ASSERT(this->lock_count > 0);
|
||||
this->lock_count++;
|
||||
} else {
|
||||
/* Otherwise, we want to disable scheduling and acquire the spinlock. */
|
||||
SchedulerType::DisableScheduling();
|
||||
this->spin_lock.Lock();
|
||||
|
||||
/* For debug, ensure that our state is valid. */
|
||||
MESOSPHERE_ASSERT(this->lock_count == 0);
|
||||
MESOSPHERE_ASSERT(this->owner_thread == nullptr);
|
||||
|
||||
/* Increment count, take ownership. */
|
||||
this->lock_count = 1;
|
||||
this->owner_thread = GetCurrentThreadPointer();
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void Unlock() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
MESOSPHERE_ASSERT(this->lock_count > 0);
|
||||
|
||||
/* Release an instance of the lock. */
|
||||
if ((--this->lock_count) == 0) {
|
||||
/* We're no longer going to hold the lock. Take note of what cores need scheduling. */
|
||||
const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads();
|
||||
|
||||
/* Note that we no longer hold the lock, and unlock the spinlock. */
|
||||
this->owner_thread = nullptr;
|
||||
this->spin_lock.Unlock();
|
||||
|
||||
/* Enable scheduling, and perform a rescheduling operation. */
|
||||
SchedulerType::EnableScheduling(cores_needing_scheduling);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
/*
|
||||
TODO: C++20
|
||||
|
||||
template<typename T>
|
||||
concept KLockable = !std::is_reference<T>::value && requires (T &t) {
|
||||
{ t.Lock() } -> std::same_as<void>;
|
||||
{ t.Unlock() } -> std::same_as<void>;
|
||||
};
|
||||
|
||||
*/
|
||||
|
||||
template<typename T> /* TODO C++20: requires KLockable<T> */
|
||||
class KScopedLock {
|
||||
NON_COPYABLE(KScopedLock);
|
||||
NON_MOVEABLE(KScopedLock);
|
||||
private:
|
||||
T *lock_ptr;
|
||||
public:
|
||||
explicit ALWAYS_INLINE KScopedLock(T *l) : lock_ptr(l) { this->lock_ptr->Lock(); }
|
||||
explicit ALWAYS_INLINE KScopedLock(T &l) : KScopedLock(std::addressof(l)) { /* ... */ }
|
||||
ALWAYS_INLINE ~KScopedLock() { this->lock_ptr->Unlock(); }
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_resource_limit.hpp>
|
||||
#include <mesosphere/kern_k_process.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KScopedResourceReservation {
|
||||
private:
|
||||
KResourceLimit *limit;
|
||||
s64 value;
|
||||
ams::svc::LimitableResource resource;
|
||||
bool succeeded;
|
||||
public:
|
||||
ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v, s64 timeout) : limit(l), value(v), resource(r) {
|
||||
if (this->limit && this->value) {
|
||||
this->succeeded = this->limit->Reserve(this->resource, this->value, timeout);
|
||||
} else {
|
||||
this->succeeded = true;
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v = 1) : limit(l), value(v), resource(r) {
|
||||
if (this->limit && this->value) {
|
||||
this->succeeded = this->limit->Reserve(this->resource, this->value);
|
||||
} else {
|
||||
this->succeeded = true;
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v, s64 t) : KScopedResourceReservation(p->GetResourceLimit(), r, v, t) { /* ... */ }
|
||||
ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v = 1) : KScopedResourceReservation(p->GetResourceLimit(), r, v) { /* ... */ }
|
||||
|
||||
ALWAYS_INLINE ~KScopedResourceReservation() {
|
||||
if (this->limit && this->value && this->succeeded) {
|
||||
this->limit->Release(this->resource, this->value);
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void Commit() {
|
||||
this->limit = nullptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool Succeeded() const {
|
||||
return this->succeeded;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_scheduler.hpp>
|
||||
#include <mesosphere/kern_select_hardware_timer.hpp>
|
||||
#include <mesosphere/kern_kernel.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KScopedSchedulerLockAndSleep {
|
||||
private:
|
||||
s64 timeout_tick;
|
||||
KThread *thread;
|
||||
KHardwareTimer *timer;
|
||||
public:
|
||||
explicit ALWAYS_INLINE KScopedSchedulerLockAndSleep(KHardwareTimer **out_timer, KThread *t, s64 timeout) : timeout_tick(timeout), thread(t) {
|
||||
/* Lock the scheduler. */
|
||||
KScheduler::s_scheduler_lock.Lock();
|
||||
|
||||
/* Set our timer only if the absolute time is positive. */
|
||||
this->timer = (this->timeout_tick > 0) ? std::addressof(Kernel::GetHardwareTimer()) : nullptr;
|
||||
|
||||
*out_timer = this->timer;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE ~KScopedSchedulerLockAndSleep() {
|
||||
/* Register the sleep. */
|
||||
if (this->timeout_tick > 0) {
|
||||
this->timer->RegisterAbsoluteTask(this->thread, this->timeout_tick);
|
||||
}
|
||||
|
||||
/* Unlock the scheduler. */
|
||||
KScheduler::s_scheduler_lock.Unlock();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void CancelSleep() {
|
||||
this->timeout_tick = 0;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_synchronization_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KSession, KAutoObject);
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KSessionRequest final : public KSlabAllocated<KSessionRequest>, public KAutoObject, public util::IntrusiveListBaseNode<KSessionRequest> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject);
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KSharedMemory final : public KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KSharedMemory;
|
||||
|
||||
class KSharedMemoryInfo : public KSlabAllocated<KSharedMemoryInfo>, public util::IntrusiveListBaseNode<KSharedMemoryInfo> {
|
||||
private:
|
||||
KSharedMemory *shared_memory;
|
||||
size_t reference_count;
|
||||
public:
|
||||
constexpr KSharedMemoryInfo() : shared_memory(), reference_count() { /* ... */ }
|
||||
~KSharedMemoryInfo() { /* ... */ }
|
||||
|
||||
constexpr void Initialize(KSharedMemory *m) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
this->shared_memory = m;
|
||||
this->reference_count = 0;
|
||||
}
|
||||
|
||||
constexpr void Open() {
|
||||
const size_t ref_count = ++this->reference_count;
|
||||
MESOSPHERE_ASSERT(ref_count > 0);
|
||||
}
|
||||
|
||||
constexpr bool Close() {
|
||||
MESOSPHERE_ASSERT(this->reference_count > 0);
|
||||
return (--this->reference_count) == 0;
|
||||
}
|
||||
|
||||
constexpr KSharedMemory *GetSharedMemory() const { return this->shared_memory; }
|
||||
constexpr size_t GetReferenceCount() const { return this->reference_count; }
|
||||
};
|
||||
|
||||
}
|
192
libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp
Normal file
192
libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp
Normal file
|
@ -0,0 +1,192 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class KSlabHeapImpl {
|
||||
NON_COPYABLE(KSlabHeapImpl);
|
||||
NON_MOVEABLE(KSlabHeapImpl);
|
||||
public:
|
||||
struct Node {
|
||||
Node *next;
|
||||
};
|
||||
private:
|
||||
std::atomic<Node *> head;
|
||||
size_t obj_size;
|
||||
public:
|
||||
constexpr KSlabHeapImpl() : head(nullptr), obj_size(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
void Initialize(size_t size) {
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(this->head == nullptr);
|
||||
this->obj_size = size;
|
||||
}
|
||||
|
||||
Node *GetHead() const {
|
||||
return this->head;
|
||||
}
|
||||
|
||||
size_t GetObjectSize() const {
|
||||
return this->obj_size;
|
||||
}
|
||||
|
||||
void *Allocate() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
Node *ret = this->head.load();
|
||||
|
||||
do {
|
||||
if (AMS_UNLIKELY(ret == nullptr)) {
|
||||
break;
|
||||
}
|
||||
} while (!this->head.compare_exchange_weak(ret, ret->next));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void Free(void *obj) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
Node *node = reinterpret_cast<Node *>(obj);
|
||||
|
||||
Node *cur_head = this->head.load();
|
||||
do {
|
||||
node->next = cur_head;
|
||||
} while (!this->head.compare_exchange_weak(cur_head, node));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
class KSlabHeapBase {
|
||||
NON_COPYABLE(KSlabHeapBase);
|
||||
NON_MOVEABLE(KSlabHeapBase);
|
||||
private:
|
||||
using Impl = impl::KSlabHeapImpl;
|
||||
private:
|
||||
Impl impl;
|
||||
uintptr_t peak;
|
||||
uintptr_t start;
|
||||
uintptr_t end;
|
||||
private:
|
||||
ALWAYS_INLINE Impl *GetImpl() {
|
||||
return std::addressof(this->impl);
|
||||
}
|
||||
ALWAYS_INLINE const Impl *GetImpl() const {
|
||||
return std::addressof(this->impl);
|
||||
}
|
||||
public:
|
||||
constexpr KSlabHeapBase() : impl(), peak(0), start(0), end(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
||||
return this->start <= address && address < this->end;
|
||||
}
|
||||
|
||||
void InitializeImpl(size_t obj_size, void *memory, size_t memory_size) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Ensure we don't initialize a slab using null memory. */
|
||||
MESOSPHERE_ABORT_UNLESS(memory != nullptr);
|
||||
|
||||
/* Initialize the base allocator. */
|
||||
this->GetImpl()->Initialize(obj_size);
|
||||
|
||||
/* Set our tracking variables. */
|
||||
const size_t num_obj = (memory_size / obj_size);
|
||||
this->start = reinterpret_cast<uintptr_t>(memory);
|
||||
this->end = this->start + num_obj * obj_size;
|
||||
this->peak = this->start;
|
||||
|
||||
/* Free the objects. */
|
||||
u8 *cur = reinterpret_cast<u8 *>(this->end);
|
||||
|
||||
for (size_t i = 0; i < num_obj; i++) {
|
||||
cur -= obj_size;
|
||||
this->GetImpl()->Free(cur);
|
||||
}
|
||||
}
|
||||
|
||||
size_t GetSlabHeapSize() const {
|
||||
return (this->end - this->start) / this->GetObjectSize();
|
||||
}
|
||||
|
||||
size_t GetObjectSize() const {
|
||||
return this->GetImpl()->GetObjectSize();
|
||||
}
|
||||
|
||||
void *AllocateImpl() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
void *obj = this->GetImpl()->Allocate();
|
||||
|
||||
/* TODO: under some debug define, track the peak for statistics, as N does? */
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
void FreeImpl(void *obj) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Don't allow freeing an object that wasn't allocated from this heap. */
|
||||
MESOSPHERE_ABORT_UNLESS(this->Contains(reinterpret_cast<uintptr_t>(obj)));
|
||||
|
||||
this->GetImpl()->Free(obj);
|
||||
}
|
||||
|
||||
size_t GetObjectIndexImpl(const void *obj) const {
|
||||
return (reinterpret_cast<uintptr_t>(obj) - this->start) / this->GetObjectSize();
|
||||
}
|
||||
|
||||
size_t GetPeakIndex() const {
|
||||
return this->GetObjectIndexImpl(reinterpret_cast<const void *>(this->peak));
|
||||
}
|
||||
|
||||
uintptr_t GetSlabHeapAddress() const {
|
||||
return this->start;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class KSlabHeap : public KSlabHeapBase {
|
||||
public:
|
||||
constexpr KSlabHeap() : KSlabHeapBase() { /* ... */ }
|
||||
|
||||
void Initialize(void *memory, size_t memory_size) {
|
||||
this->InitializeImpl(sizeof(T), memory, memory_size);
|
||||
}
|
||||
|
||||
T *Allocate() {
|
||||
T *obj = reinterpret_cast<T *>(this->AllocateImpl());
|
||||
if (AMS_LIKELY(obj != nullptr)) {
|
||||
new (obj) T();
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
void Free(T *obj) {
|
||||
this->FreeImpl(obj);
|
||||
}
|
||||
|
||||
size_t GetObjectIndex(const T *obj) const {
|
||||
return this->GetObjectIndexImpl(obj);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_scoped_lock.hpp>
|
||||
|
||||
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||
|
||||
#include <mesosphere/arch/arm64/kern_k_spin_lock.hpp>
|
||||
namespace ams::kern {
|
||||
using ams::kern::arch::arm64::KAlignedSpinLock;
|
||||
using ams::kern::arch::arm64::KNotAlignedSpinLock;
|
||||
using ams::kern::arch::arm64::KSpinLock;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#error "Unknown architecture for KInterruptManager"
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
using KScopedSpinLock = KScopedLock<KSpinLock>;
|
||||
using KScopedAlignedSpinLock = KScopedLock<KAlignedSpinLock>;
|
||||
using KScopedNotAlignedSpinLock = KScopedLock<KNotAlignedSpinLock>;
|
||||
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_linked_list.hpp>
|
||||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_k_synchronization_object.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KSynchronization {
|
||||
private:
|
||||
friend class KSynchronizationObject;
|
||||
public:
|
||||
constexpr KSynchronization() { /* ... */ }
|
||||
|
||||
Result Wait(s32 *out_index, KSynchronizationObject **objects, const s32 num_objects, s64 timeout);
|
||||
private:
|
||||
void OnAvailable(KSynchronizationObject *object);
|
||||
void OnAbort(KSynchronizationObject *object, Result abort_reason);
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_linked_list.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KThread;
|
||||
|
||||
class KSynchronizationObject : public KAutoObjectWithList {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject);
|
||||
public:
|
||||
using ThreadList = KLinkedList<KThread>;
|
||||
using iterator = ThreadList::iterator;
|
||||
private:
|
||||
ThreadList thread_list;
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE explicit KSynchronizationObject() : KAutoObjectWithList(), thread_list() { MESOSPHERE_ASSERT_THIS(); }
|
||||
virtual ~KSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
virtual void OnFinalizeSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
void NotifyAvailable();
|
||||
void NotifyAbort(Result abort_reason);
|
||||
public:
|
||||
virtual void Finalize() override;
|
||||
virtual bool IsSignaled() const = 0;
|
||||
virtual void DebugWaiters();
|
||||
|
||||
iterator AddWaiterThread(KThread *thread);
|
||||
iterator RemoveWaiterThread(iterator it);
|
||||
|
||||
iterator begin();
|
||||
iterator end();
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_system_control.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KTargetSystem {
|
||||
private:
|
||||
friend class KSystemControl;
|
||||
private:
|
||||
static inline bool s_is_debug_mode;
|
||||
static inline bool s_enable_debug_logging;
|
||||
static inline bool s_enable_user_exception_handlers;
|
||||
static inline bool s_enable_debug_memory_fill;
|
||||
static inline bool s_enable_user_pmu_access;
|
||||
static inline bool s_enable_kernel_debugging;
|
||||
private:
|
||||
static ALWAYS_INLINE void SetIsDebugMode(bool en) { s_is_debug_mode = en; }
|
||||
static ALWAYS_INLINE void EnableDebugLogging(bool en) { s_enable_debug_logging = en; }
|
||||
static ALWAYS_INLINE void EnableUserExceptionHandlers(bool en) { s_enable_user_exception_handlers = en; }
|
||||
static ALWAYS_INLINE void EnableDebugMemoryFill(bool en) { s_enable_debug_memory_fill = en; }
|
||||
static ALWAYS_INLINE void EnableUserPmuAccess(bool en) { s_enable_user_pmu_access = en; }
|
||||
static ALWAYS_INLINE void EnableKernelDebugging(bool en) { s_enable_kernel_debugging = en; }
|
||||
public:
|
||||
static ALWAYS_INLINE bool IsDebugMode() { return s_is_debug_mode; }
|
||||
static ALWAYS_INLINE bool IsDebugLoggingEnabled() { return s_enable_debug_logging; }
|
||||
static ALWAYS_INLINE bool IsUserExceptionHandlersEnabled() { return s_enable_user_exception_handlers; }
|
||||
static ALWAYS_INLINE bool IsDebugMemoryFillEnabled() { return s_enable_debug_memory_fill; }
|
||||
static ALWAYS_INLINE bool IsUserPmuAccessEnabled() { return s_enable_user_pmu_access; }
|
||||
static ALWAYS_INLINE bool IsKernelDebuggingEnabled() { return s_enable_kernel_debugging; }
|
||||
};
|
||||
|
||||
}
|
407
libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp
Normal file
407
libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp
Normal file
|
@ -0,0 +1,407 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_synchronization_object.hpp>
|
||||
#include <mesosphere/kern_k_affinity_mask.hpp>
|
||||
#include <mesosphere/kern_k_thread_context.hpp>
|
||||
#include <mesosphere/kern_k_current_context.hpp>
|
||||
#include <mesosphere/kern_k_timer_task.hpp>
|
||||
#include <mesosphere/kern_k_worker_task.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KThreadQueue;
|
||||
class KProcess;
|
||||
class KConditionVariable;
|
||||
class KAddressArbiter;
|
||||
|
||||
using KThreadFunction = void (*)(uintptr_t);
|
||||
|
||||
class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>, public KTimerTask, public KWorkerTask {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject);
|
||||
private:
|
||||
friend class KProcess;
|
||||
friend class KConditionVariable;
|
||||
friend class KAddressArbiter;
|
||||
public:
|
||||
static constexpr s32 MainThreadPriority = 1;
|
||||
static constexpr s32 IdleThreadPriority = 64;
|
||||
|
||||
enum ThreadType : u32 {
|
||||
ThreadType_Main = 0,
|
||||
ThreadType_Kernel = 1,
|
||||
ThreadType_HighPriority = 2,
|
||||
ThreadType_User = 3,
|
||||
};
|
||||
|
||||
enum SuspendType : u32 {
|
||||
SuspendType_Process = 0,
|
||||
SuspendType_Thread = 1,
|
||||
SuspendType_Debug = 2,
|
||||
SuspendType_Unk3 = 3,
|
||||
SuspendType_Init = 4,
|
||||
|
||||
SuspendType_Count,
|
||||
};
|
||||
|
||||
enum ThreadState : u16 {
|
||||
ThreadState_Initialized = 0,
|
||||
ThreadState_Waiting = 1,
|
||||
ThreadState_Runnable = 2,
|
||||
ThreadState_Terminated = 3,
|
||||
|
||||
ThreadState_SuspendShift = 4,
|
||||
ThreadState_Mask = (1 << ThreadState_SuspendShift) - 1,
|
||||
|
||||
ThreadState_ProcessSuspended = (1 << (SuspendType_Process + ThreadState_SuspendShift)),
|
||||
ThreadState_ThreadSuspended = (1 << (SuspendType_Thread + ThreadState_SuspendShift)),
|
||||
ThreadState_DebugSuspended = (1 << (SuspendType_Debug + ThreadState_SuspendShift)),
|
||||
ThreadState_Unk3Suspended = (1 << (SuspendType_Unk3 + ThreadState_SuspendShift)),
|
||||
ThreadState_InitSuspended = (1 << (SuspendType_Init + ThreadState_SuspendShift)),
|
||||
|
||||
ThreadState_SuspendFlagMask = ((1 << SuspendType_Count) - 1) << ThreadState_SuspendShift,
|
||||
};
|
||||
|
||||
enum DpcFlag : u32 {
|
||||
DpcFlag_Terminating = (1 << 0),
|
||||
DpcFlag_Terminated = (1 << 1),
|
||||
};
|
||||
|
||||
struct StackParameters {
|
||||
alignas(0x10) u8 svc_permission[0x10];
|
||||
std::atomic<u8> dpc_flags;
|
||||
u8 current_svc_id;
|
||||
bool is_calling_svc;
|
||||
bool is_in_exception_handler;
|
||||
bool is_preemption_state_pinned;
|
||||
s32 disable_count;
|
||||
KThreadContext *context;
|
||||
};
|
||||
static_assert(alignof(StackParameters) == 0x10);
|
||||
|
||||
struct QueueEntry {
|
||||
private:
|
||||
KThread *prev;
|
||||
KThread *next;
|
||||
public:
|
||||
constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ }
|
||||
|
||||
constexpr void Initialize() {
|
||||
this->prev = nullptr;
|
||||
this->next = nullptr;
|
||||
}
|
||||
|
||||
constexpr KThread *GetPrev() const { return this->prev; }
|
||||
constexpr KThread *GetNext() const { return this->next; }
|
||||
constexpr void SetPrev(KThread *t) { this->prev = t; }
|
||||
constexpr void SetNext(KThread *t) { this->next = t; }
|
||||
};
|
||||
private:
|
||||
static constexpr size_t PriorityInheritanceCountMax = 10;
|
||||
union SyncObjectBuffer {
|
||||
KSynchronizationObject *sync_objects[ams::svc::MaxWaitSynchronizationHandleCount];
|
||||
ams::svc::Handle handles[ams::svc::MaxWaitSynchronizationHandleCount * (sizeof(KSynchronizationObject *) / sizeof(ams::svc::Handle))];
|
||||
|
||||
constexpr SyncObjectBuffer() : sync_objects() { /* ... */ }
|
||||
};
|
||||
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
|
||||
private:
|
||||
static inline std::atomic<u64> s_next_thread_id = 0;
|
||||
private:
|
||||
alignas(16) KThreadContext thread_context{};
|
||||
KAffinityMask affinity_mask{};
|
||||
u64 thread_id{};
|
||||
std::atomic<s64> cpu_time{};
|
||||
KSynchronizationObject *synced_object{};
|
||||
KLightLock *waiting_lock{};
|
||||
uintptr_t condvar_key{};
|
||||
uintptr_t entrypoint{};
|
||||
KProcessAddress arbiter_key{};
|
||||
KProcess *parent{};
|
||||
void *kernel_stack_top{};
|
||||
u32 *light_ipc_data{};
|
||||
KProcessAddress tls_address{};
|
||||
void *tls_heap_address{};
|
||||
KLightLock activity_pause_lock{};
|
||||
SyncObjectBuffer sync_object_buffer{};
|
||||
s64 schedule_count{};
|
||||
s64 last_scheduled_tick{};
|
||||
QueueEntry per_core_priority_queue_entry[cpu::NumCores]{};
|
||||
QueueEntry sleeping_queue_entry{};
|
||||
KThreadQueue *sleeping_queue{};
|
||||
util::IntrusiveListNode waiter_list_node{};
|
||||
util::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
|
||||
util::IntrusiveListNode process_list_node{};
|
||||
|
||||
using WaiterListTraits = util::IntrusiveListMemberTraitsDeferredAssert<&KThread::waiter_list_node>;
|
||||
using WaiterList = WaiterListTraits::ListType;
|
||||
|
||||
WaiterList waiter_list{};
|
||||
WaiterList paused_waiter_list{};
|
||||
KThread *lock_owner{};
|
||||
KConditionVariable *cond_var{};
|
||||
uintptr_t debug_params[3]{};
|
||||
u32 arbiter_value{};
|
||||
u32 suspend_request_flags{};
|
||||
u32 suspend_allowed_flags{};
|
||||
Result wait_result;
|
||||
Result debug_exception_result;
|
||||
s32 priority{};
|
||||
s32 core_id{};
|
||||
s32 base_priority{};
|
||||
s32 ideal_core_id{};
|
||||
s32 num_kernel_waiters{};
|
||||
KAffinityMask original_affinity_mask{};
|
||||
s32 original_ideal_core_id{};
|
||||
s32 num_core_migration_disables{};
|
||||
ThreadState thread_state{};
|
||||
std::atomic<bool> termination_requested{};
|
||||
bool ipc_cancelled{};
|
||||
bool wait_cancelled{};
|
||||
bool cancellable{};
|
||||
bool registered{};
|
||||
bool signaled{};
|
||||
bool initialized{};
|
||||
bool debug_attached{};
|
||||
s8 priority_inheritance_count{};
|
||||
bool resource_limit_release_hint{};
|
||||
public:
|
||||
constexpr KThread() : wait_result(svc::ResultNoSynchronizationObject()), debug_exception_result(ResultSuccess()) { /* ... */ }
|
||||
|
||||
virtual ~KThread() { /* ... */ }
|
||||
/* TODO: Is a constexpr KThread() possible? */
|
||||
|
||||
Result Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type);
|
||||
|
||||
private:
|
||||
static Result InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type);
|
||||
public:
|
||||
static Result InitializeKernelThread(KThread *thread, KThreadFunction func, uintptr_t arg, s32 prio, s32 core) {
|
||||
return InitializeThread(thread, func, arg, Null<KProcessAddress>, prio, core, nullptr, ThreadType_Kernel);
|
||||
}
|
||||
|
||||
static Result InitializeHighPriorityThread(KThread *thread, KThreadFunction func, uintptr_t arg) {
|
||||
return InitializeThread(thread, func, arg, Null<KProcessAddress>, 0, GetCurrentCoreId(), nullptr, ThreadType_HighPriority);
|
||||
}
|
||||
|
||||
static Result InitializeUserThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner) {
|
||||
return InitializeThread(thread, func, arg, user_stack_top, prio, core, owner, ThreadType_User);
|
||||
}
|
||||
|
||||
static void ResumeThreadsSuspendedForInit();
|
||||
private:
|
||||
StackParameters &GetStackParameters() {
|
||||
return *(reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1);
|
||||
}
|
||||
|
||||
const StackParameters &GetStackParameters() const {
|
||||
return *(reinterpret_cast<const StackParameters *>(this->kernel_stack_top) - 1);
|
||||
}
|
||||
public:
|
||||
ALWAYS_INLINE s32 GetDisableDispatchCount() const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
return this->GetStackParameters().disable_count;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void DisableDispatch() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 0);
|
||||
this->GetStackParameters().disable_count++;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void EnableDispatch() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() > 0);
|
||||
this->GetStackParameters().disable_count--;
|
||||
}
|
||||
|
||||
NOINLINE void DisableCoreMigration();
|
||||
NOINLINE void EnableCoreMigration();
|
||||
|
||||
ALWAYS_INLINE void SetInExceptionHandler() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
this->GetStackParameters().is_in_exception_handler = true;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ClearInExceptionHandler() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
this->GetStackParameters().is_in_exception_handler = false;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool IsInExceptionHandler() const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
return this->GetStackParameters().is_in_exception_handler;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void RegisterDpc(DpcFlag flag) {
|
||||
this->GetStackParameters().dpc_flags |= flag;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ClearDpc(DpcFlag flag) {
|
||||
this->GetStackParameters().dpc_flags &= ~flag;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u8 GetDpc() const {
|
||||
return this->GetStackParameters().dpc_flags;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool HasDpc() const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
return this->GetDpc() != 0;;
|
||||
}
|
||||
private:
|
||||
void Suspend();
|
||||
ALWAYS_INLINE void AddWaiterImpl(KThread *thread);
|
||||
ALWAYS_INLINE void RemoveWaiterImpl(KThread *thread);
|
||||
ALWAYS_INLINE static void RestorePriority(KThread *thread);
|
||||
public:
|
||||
constexpr u64 GetThreadId() const { return this->thread_id; }
|
||||
|
||||
constexpr KThreadContext &GetContext() { return this->thread_context; }
|
||||
constexpr const KThreadContext &GetContext() const { return this->thread_context; }
|
||||
constexpr const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; }
|
||||
constexpr ThreadState GetState() const { return static_cast<ThreadState>(this->thread_state & ThreadState_Mask); }
|
||||
constexpr ThreadState GetRawState() const { return this->thread_state; }
|
||||
NOINLINE void SetState(ThreadState state);
|
||||
|
||||
NOINLINE KThreadContext *GetContextForSchedulerLoop();
|
||||
|
||||
constexpr uintptr_t GetConditionVariableKey() const { return this->condvar_key; }
|
||||
|
||||
constexpr s32 GetIdealCore() const { return this->ideal_core_id; }
|
||||
constexpr s32 GetActiveCore() const { return this->core_id; }
|
||||
constexpr void SetActiveCore(s32 core) { this->core_id = core; }
|
||||
constexpr s32 GetPriority() const { return this->priority; }
|
||||
constexpr void SetPriority(s32 prio) { this->priority = prio; }
|
||||
constexpr s32 GetBasePriority() const { return this->base_priority; }
|
||||
|
||||
constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; }
|
||||
constexpr const QueueEntry &GetPriorityQueueEntry(s32 core) const { return this->per_core_priority_queue_entry[core]; }
|
||||
|
||||
constexpr QueueEntry &GetSleepingQueueEntry() { return this->sleeping_queue_entry; }
|
||||
constexpr const QueueEntry &GetSleepingQueueEntry() const { return this->sleeping_queue_entry; }
|
||||
constexpr void SetSleepingQueue(KThreadQueue *q) { this->sleeping_queue = q; }
|
||||
|
||||
constexpr KConditionVariable *GetConditionVariable() const { return this->cond_var; }
|
||||
|
||||
constexpr s32 GetNumKernelWaiters() const { return this->num_kernel_waiters; }
|
||||
|
||||
void AddWaiter(KThread *thread);
|
||||
void RemoveWaiter(KThread *thread);
|
||||
KThread *RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key);
|
||||
|
||||
constexpr KProcessAddress GetAddressKey() const { return this->arbiter_key; }
|
||||
constexpr void SetAddressKey(KProcessAddress key) { this->arbiter_key = key; }
|
||||
constexpr void SetLockOwner(KThread *owner) { this->lock_owner = owner; }
|
||||
constexpr KThread *GetLockOwner() const { return this->lock_owner; }
|
||||
|
||||
constexpr void SetSyncedObject(KSynchronizationObject *obj, Result wait_res) {
|
||||
this->synced_object = obj;
|
||||
this->wait_result = wait_res;
|
||||
}
|
||||
|
||||
bool HasWaiters() const { return !this->waiter_list.empty(); }
|
||||
|
||||
constexpr s64 GetLastScheduledTick() const { return this->last_scheduled_tick; }
|
||||
constexpr void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; }
|
||||
|
||||
constexpr KProcess *GetOwnerProcess() const { return this->parent; }
|
||||
constexpr bool IsUserThread() const { return this->parent != nullptr; }
|
||||
|
||||
constexpr KProcessAddress GetThreadLocalRegionAddress() const { return this->tls_address; }
|
||||
constexpr void *GetThreadLocalRegionHeapAddress() const { return this->tls_heap_address; }
|
||||
|
||||
constexpr u16 GetUserPreemptionState() const { return *GetPointer<u16>(this->tls_address + 0x100); }
|
||||
constexpr void SetKernelPreemptionState(u16 state) const { *GetPointer<u16>(this->tls_address + 0x100 + sizeof(u16)) = state; }
|
||||
|
||||
void AddCpuTime(s64 amount) {
|
||||
this->cpu_time += amount;
|
||||
}
|
||||
|
||||
constexpr u32 GetSuspendFlags() const { return this->suspend_allowed_flags & this->suspend_request_flags; }
|
||||
constexpr bool IsSuspended() const { return this->GetSuspendFlags() != 0; }
|
||||
void RequestSuspend(SuspendType type);
|
||||
void Resume(SuspendType type);
|
||||
void TrySuspend();
|
||||
void Continue();
|
||||
|
||||
void ContinueIfHasKernelWaiters() {
|
||||
if (this->GetNumKernelWaiters() > 0) {
|
||||
this->Continue();
|
||||
}
|
||||
}
|
||||
|
||||
void Wakeup();
|
||||
|
||||
Result SetPriorityToIdle();
|
||||
|
||||
Result Run();
|
||||
void Exit();
|
||||
|
||||
ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1; }
|
||||
ALWAYS_INLINE void *GetKernelStackTop() const { return this->kernel_stack_top; }
|
||||
|
||||
/* TODO: This is kind of a placeholder definition. */
|
||||
|
||||
ALWAYS_INLINE bool IsTerminationRequested() const {
|
||||
return this->termination_requested || this->GetRawState() == ThreadState_Terminated;
|
||||
}
|
||||
|
||||
public:
|
||||
/* Overridden parent functions. */
|
||||
virtual u64 GetId() const override { return this->GetThreadId(); }
|
||||
|
||||
virtual bool IsInitialized() const override { return this->initialized; }
|
||||
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(this->parent) | (this->resource_limit_release_hint ? 1 : 0); }
|
||||
|
||||
static void PostDestroy(uintptr_t arg);
|
||||
|
||||
virtual void Finalize() override;
|
||||
virtual bool IsSignaled() const override;
|
||||
virtual void OnTimer() override;
|
||||
virtual void DoWorkerTask() override;
|
||||
public:
|
||||
static constexpr bool IsWaiterListValid() {
|
||||
return WaiterListTraits::IsValid();
|
||||
}
|
||||
};
|
||||
static_assert(alignof(KThread) == 0x10);
|
||||
static_assert(KThread::IsWaiterListValid());
|
||||
|
||||
class KScopedDisableDispatch {
|
||||
public:
|
||||
explicit ALWAYS_INLINE KScopedDisableDispatch() {
|
||||
GetCurrentThread().DisableDispatch();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE ~KScopedDisableDispatch() {
|
||||
GetCurrentThread().EnableDispatch();
|
||||
}
|
||||
};
|
||||
|
||||
class KScopedEnableDispatch {
|
||||
public:
|
||||
explicit ALWAYS_INLINE KScopedEnableDispatch() {
|
||||
GetCurrentThread().EnableDispatch();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE ~KScopedEnableDispatch() {
|
||||
GetCurrentThread().DisableDispatch();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#ifdef ATMOSPHERE_ARCH_ARM64
|
||||
#include <mesosphere/arch/arm64/kern_k_thread_context.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
using ams::kern::arch::arm64::KThreadContext;
|
||||
}
|
||||
#else
|
||||
#error "Unknown architecture for KThreadContext"
|
||||
#endif
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_page_buffer.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KThread;
|
||||
class KProcess;
|
||||
|
||||
class KThreadLocalPage : public util::IntrusiveRedBlackTreeBaseNode<KThreadLocalPage>, public KSlabAllocated<KThreadLocalPage> {
|
||||
public:
|
||||
static constexpr size_t RegionsPerPage = PageSize / ams::svc::ThreadLocalRegionSize;
|
||||
static_assert(RegionsPerPage > 0);
|
||||
private:
|
||||
KProcessAddress virt_addr;
|
||||
KProcess *owner;
|
||||
bool is_region_free[RegionsPerPage];
|
||||
public:
|
||||
constexpr explicit KThreadLocalPage(KProcessAddress addr) : virt_addr(addr), owner(nullptr), is_region_free() {
|
||||
for (size_t i = 0; i < RegionsPerPage; i++) {
|
||||
this->is_region_free[i] = true;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr explicit KThreadLocalPage() : KThreadLocalPage(Null<KProcessAddress>) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE KProcessAddress GetAddress() const { return this->virt_addr; }
|
||||
|
||||
static constexpr ALWAYS_INLINE int Compare(const KThreadLocalPage &lhs, const KThreadLocalPage &rhs) {
|
||||
const KProcessAddress lval = lhs.GetAddress();
|
||||
const KProcessAddress rval = rhs.GetAddress();
|
||||
|
||||
if (lval < rval) {
|
||||
return -1;
|
||||
} else if (lval == rval) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
private:
|
||||
constexpr ALWAYS_INLINE KProcessAddress GetRegionAddress(size_t i) {
|
||||
return this->GetAddress() + i * ams::svc::ThreadLocalRegionSize;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool Contains(KProcessAddress addr) {
|
||||
return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetRegionIndex(KProcessAddress addr) {
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), ams::svc::ThreadLocalRegionSize));
|
||||
MESOSPHERE_ASSERT(this->Contains(addr));
|
||||
return (addr - this->GetAddress()) / ams::svc::ThreadLocalRegionSize;
|
||||
}
|
||||
public:
|
||||
Result Initialize(KProcess *process);
|
||||
Result Finalize();
|
||||
|
||||
KProcessAddress Reserve();
|
||||
void Release(KProcessAddress addr);
|
||||
|
||||
void *GetPointer() const;
|
||||
|
||||
bool IsAllUsed() const {
|
||||
for (size_t i = 0; i < RegionsPerPage; i++) {
|
||||
if (this->is_region_free[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IsAllFree() const {
|
||||
for (size_t i = 0; i < RegionsPerPage; i++) {
|
||||
if (!this->is_region_free[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IsAnyUsed() const {
|
||||
return !this->IsAllFree();
|
||||
}
|
||||
|
||||
bool IsAnyFree() const {
|
||||
return !this->IsAllUsed();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KThreadQueue {
|
||||
private:
|
||||
using Entry = KThread::QueueEntry;
|
||||
private:
|
||||
Entry root;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KThreadQueue() : root() { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsEmpty() const { return this->root.GetNext() == nullptr; }
|
||||
|
||||
constexpr ALWAYS_INLINE KThread *GetFront() const { return this->root.GetNext(); }
|
||||
constexpr ALWAYS_INLINE KThread *GetNext(KThread *t) const { return t->GetSleepingQueueEntry().GetNext(); }
|
||||
private:
|
||||
constexpr ALWAYS_INLINE KThread *GetBack() const { return this->root.GetPrev(); }
|
||||
|
||||
constexpr ALWAYS_INLINE void Enqueue(KThread *add) {
|
||||
/* Get the entry associated with the added thread. */
|
||||
Entry &add_entry = add->GetSleepingQueueEntry();
|
||||
|
||||
/* Get the entry associated with the end of the queue. */
|
||||
KThread *tail = this->GetBack();
|
||||
Entry &tail_entry = (tail != nullptr) ? tail->GetSleepingQueueEntry() : this->root;
|
||||
|
||||
/* Link the entries. */
|
||||
add_entry.SetPrev(tail);
|
||||
add_entry.SetNext(nullptr);
|
||||
tail_entry.SetNext(add);
|
||||
this->root.SetPrev(add);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void Remove(KThread *remove) {
|
||||
/* Get the entry associated with the thread. */
|
||||
Entry &remove_entry = remove->GetSleepingQueueEntry();
|
||||
|
||||
/* Get the entries associated with next and prev. */
|
||||
KThread *prev = remove_entry.GetPrev();
|
||||
KThread *next = remove_entry.GetNext();
|
||||
Entry &prev_entry = (prev != nullptr) ? prev->GetSleepingQueueEntry() : this->root;
|
||||
Entry &next_entry = (next != nullptr) ? next->GetSleepingQueueEntry() : this->root;
|
||||
|
||||
/* Unlink. */
|
||||
prev_entry.SetNext(next);
|
||||
next_entry.SetPrev(prev);
|
||||
}
|
||||
public:
|
||||
constexpr ALWAYS_INLINE void Dequeue() {
|
||||
/* Get the front of the queue. */
|
||||
KThread *head = this->GetFront();
|
||||
if (head == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
MESOSPHERE_ASSERT(head->GetState() == KThread::ThreadState_Waiting);
|
||||
|
||||
/* Get the entry for the next head. */
|
||||
KThread *next = GetNext(head);
|
||||
Entry &next_entry = (next != nullptr) ? next->GetSleepingQueueEntry() : this->root;
|
||||
|
||||
/* Link the entries. */
|
||||
this->root.SetNext(next);
|
||||
next_entry.SetPrev(nullptr);
|
||||
|
||||
/* Clear the head's queue. */
|
||||
head->SetSleepingQueue(nullptr);
|
||||
}
|
||||
|
||||
bool SleepThread(KThread *t) {
|
||||
/* Set the thread's queue and mark it as waiting. */
|
||||
t->SetSleepingQueue(this);
|
||||
t->SetState(KThread::ThreadState_Waiting);
|
||||
|
||||
/* Add the thread to the queue. */
|
||||
this->Enqueue(t);
|
||||
|
||||
/* If the thread needs terminating, undo our work. */
|
||||
if (t->IsTerminationRequested()) {
|
||||
this->WakeupThread(t);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void WakeupThread(KThread *t) {
|
||||
MESOSPHERE_ASSERT(t->GetState() == KThread::ThreadState_Waiting);
|
||||
|
||||
/* Remove the thread from the queue. */
|
||||
this->Remove(t);
|
||||
|
||||
/* Mark the thread as no longer sleeping. */
|
||||
t->SetState(KThread::ThreadState_Runnable);
|
||||
t->SetSleepingQueue(nullptr);
|
||||
}
|
||||
|
||||
KThread *WakeupFrontThread() {
|
||||
KThread *front = this->GetFront();
|
||||
if (front != nullptr) {
|
||||
MESOSPHERE_ASSERT(front->GetState() == KThread::ThreadState_Waiting);
|
||||
|
||||
/* Remove the thread from the queue. */
|
||||
this->Dequeue();
|
||||
|
||||
/* Mark the thread as no longer sleeping. */
|
||||
front->SetState(KThread::ThreadState_Runnable);
|
||||
front->SetSleepingQueue(nullptr);
|
||||
}
|
||||
return front;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KTimerTask : public util::IntrusiveRedBlackTreeBaseNode<KTimerTask> {
|
||||
private:
|
||||
s64 time;
|
||||
public:
|
||||
static constexpr ALWAYS_INLINE int Compare(const KTimerTask &lhs, const KTimerTask &rhs) {
|
||||
if (lhs.GetTime() < rhs.GetTime()) {
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KTimerTask() : time(0) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE void SetTime(s64 t) {
|
||||
this->time = t;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE s64 GetTime() const {
|
||||
return this->time;
|
||||
}
|
||||
|
||||
virtual void OnTimer() = 0;
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KTransferMemory final : public KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject);
|
||||
public:
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
|
@ -14,7 +14,7 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
|
@ -50,6 +50,10 @@ namespace ams::kern {
|
|||
return this->address - rhs;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE ptrdiff_t operator-(KTypedAddress rhs) const {
|
||||
return this->address - rhs.address;
|
||||
}
|
||||
|
||||
template<typename I>
|
||||
constexpr ALWAYS_INLINE KTypedAddress operator+=(I rhs) {
|
||||
static_assert(std::is_integral<I>::value);
|
||||
|
@ -81,6 +85,11 @@ namespace ams::kern {
|
|||
return this->address >> shift;
|
||||
}
|
||||
|
||||
template<typename U>
|
||||
constexpr ALWAYS_INLINE size_t operator/(U size) const { return this->address / size; }
|
||||
|
||||
/* constexpr ALWAYS_INLINE uintptr_t operator%(U align) const { return this->address % align; } */
|
||||
|
||||
/* Comparison operators. */
|
||||
constexpr ALWAYS_INLINE bool operator==(KTypedAddress rhs) const {
|
||||
return this->address == rhs.address;
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_timer_task.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KWaitObject : public KTimerTask {
|
||||
private:
|
||||
using Entry = KThread::QueueEntry;
|
||||
private:
|
||||
Entry root;
|
||||
bool uses_timer;
|
||||
public:
|
||||
constexpr KWaitObject() : root(), uses_timer() { /* ... */ }
|
||||
|
||||
virtual void OnTimer() override;
|
||||
|
||||
/* TODO: Member functions */
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KWorkerTask {
|
||||
private:
|
||||
KWorkerTask *next_task;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KWorkerTask() : next_task(nullptr) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE KWorkerTask *GetNextTask() const { return this->next_task; }
|
||||
constexpr ALWAYS_INLINE void SetNextTask(KWorkerTask *task) { this->next_task = task; }
|
||||
|
||||
virtual void DoWorkerTask() = 0;
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_worker_task.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KWorkerTaskManager {
|
||||
public:
|
||||
static constexpr s32 ExitWorkerPriority = 11;
|
||||
|
||||
enum WorkerType {
|
||||
WorkerType_Exit,
|
||||
|
||||
WorkerType_Count,
|
||||
};
|
||||
private:
|
||||
KWorkerTask *head_task;
|
||||
KWorkerTask *tail_task;
|
||||
KThread *thread;
|
||||
WorkerType type;
|
||||
bool active;
|
||||
private:
|
||||
static void ThreadFunction(uintptr_t arg);
|
||||
void ThreadFunctionImpl();
|
||||
|
||||
KWorkerTask *GetTask();
|
||||
void AddTask(KWorkerTask *task);
|
||||
public:
|
||||
constexpr KWorkerTaskManager() : head_task(), tail_task(), thread(), type(WorkerType_Count), active() { /* ... */ }
|
||||
|
||||
NOINLINE void Initialize(WorkerType wt, s32 priority);
|
||||
static void AddTask(WorkerType type, KWorkerTask *task);
|
||||
};
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue