mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 20:31:14 +00:00
Add mesosphere (VERY VERY WIP)
This commit is contained in:
parent
50e307b4b7
commit
745fa84e5e
56 changed files with 5033 additions and 0 deletions
152
mesosphere/Makefile
Normal file
152
mesosphere/Makefile
Normal file
|
@ -0,0 +1,152 @@
|
|||
#---------------------------------------------------------------------------------
|
||||
.SUFFIXES:
|
||||
#---------------------------------------------------------------------------------
|
||||
|
||||
ifeq ($(MESOSPHERE_BOARD),)
|
||||
export MESOSPHERE_BOARD := nintendo-switch
|
||||
endif
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# TARGET is the name of the output
|
||||
# BUILD is the directory where object files & intermediate files will be placed
|
||||
# SOURCES is a list of directories containing source code
|
||||
# DATA is a list of directories containing data files
|
||||
# INCLUDES is a list of directories containing header files
|
||||
#---------------------------------------------------------------------------------
|
||||
TARGET := $(notdir $(CURDIR))
|
||||
BUILD := build
|
||||
|
||||
ifneq ($(BUILD),$(notdir $(CURDIR)))
|
||||
export CONFIG_DIR := $(CURDIR)/config
|
||||
ifeq ($(MESOSPHERE_BOARD),nintendo-switch)
|
||||
export BOARD_MAKE_DIR := $(CURDIR)/config/board/nintendo/switch
|
||||
export ARCH_MAKE_DIR := $(CURDIR)/config/arch/arm64
|
||||
endif
|
||||
endif
|
||||
|
||||
include $(CONFIG_DIR)/rules.mk
|
||||
include $(CONFIG_DIR)/common.mk
|
||||
include $(ARCH_MAKE_DIR)/arch.mk
|
||||
include $(BOARD_MAKE_DIR)/board.mk
|
||||
|
||||
SOURCES := $(COMMON_SOURCES_DIRS) $(ARCH_SOURCE_DIRS) $(BOARD_SOURCE_DIRS)
|
||||
DATA := data
|
||||
INCLUDES := include ../common/include
|
||||
|
||||
DEFINES := $(COMMON_DEFINES) $(ARCH_DEFINES) $(BOARD_DEFINES)
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# options for code generation
|
||||
#---------------------------------------------------------------------------------
|
||||
SETTING := $(COMMON_SETTING) $(ARCH_SETTING) $(BOARD_SETTING)
|
||||
|
||||
CFLAGS := $(SETTING) $(DEFINES) $(COMMON_CFLAGS) $(ARCH_CFLAGS) $(BOARD_CFLAGS)
|
||||
CFLAGS += $(INCLUDE)
|
||||
|
||||
CXXFLAGS := $(CFLAGS) $(COMMON_CXXFLAGS) $(ARCH_CXXFLAGS) $(BOARD_CXXFLAGS)
|
||||
|
||||
ASFLAGS := -g $(SETTING)
|
||||
LDFLAGS = -specs=$(ARCH_MAKE_DIR)/linker.specs $(SETTING) $(COMMON_LDFLAGS)
|
||||
|
||||
LIBS :=
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# list of directories containing libraries, this must be the top level containing
|
||||
# include and lib
|
||||
#---------------------------------------------------------------------------------
|
||||
LIBDIRS :=
|
||||
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# no real need to edit anything past this point unless you need to add additional
|
||||
# rules for different file extensions
|
||||
#---------------------------------------------------------------------------------
|
||||
ifneq ($(BUILD),$(notdir $(CURDIR)))
|
||||
#---------------------------------------------------------------------------------
|
||||
|
||||
export OUTPUT := $(CURDIR)/$(TARGET)
|
||||
export TOPDIR := $(CURDIR)
|
||||
|
||||
export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) \
|
||||
$(foreach dir,$(DATA),$(CURDIR)/$(dir))
|
||||
|
||||
export DEPSDIR := $(CURDIR)/$(BUILD)
|
||||
|
||||
CFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.c)))
|
||||
CPPFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.cpp)))
|
||||
SFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.s)))
|
||||
BINFILES := $(foreach dir,$(DATA),$(notdir $(wildcard $(dir)/*.*)))
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# use CXX for linking C++ projects, CC for standard C
|
||||
#---------------------------------------------------------------------------------
|
||||
ifeq ($(strip $(CPPFILES)),)
|
||||
#---------------------------------------------------------------------------------
|
||||
export LD := $(CC)
|
||||
#---------------------------------------------------------------------------------
|
||||
else
|
||||
#---------------------------------------------------------------------------------
|
||||
export LD := $(CXX)
|
||||
#---------------------------------------------------------------------------------
|
||||
endif
|
||||
#---------------------------------------------------------------------------------
|
||||
|
||||
export OFILES := $(addsuffix .o,$(BINFILES)) \
|
||||
$(CPPFILES:.cpp=.o) $(CFILES:.c=.o) $(SFILES:.s=.o)
|
||||
|
||||
export INCLUDE := $(foreach dir,$(INCLUDES),-I$(CURDIR)/$(dir)) \
|
||||
$(foreach dir,$(LIBDIRS),-I$(dir)/include) \
|
||||
-I$(CURDIR)/$(BUILD)
|
||||
|
||||
export LIBPATHS := $(foreach dir,$(LIBDIRS),-L$(dir)/lib)
|
||||
|
||||
.PHONY: $(BUILD) clean all
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
all: $(BUILD)
|
||||
|
||||
$(BUILD):
|
||||
@[ -d $@ ] || mkdir -p $@
|
||||
@$(MAKE) --no-print-directory -C $(BUILD) -f $(CURDIR)/Makefile
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
clean:
|
||||
@echo clean ...
|
||||
@rm -fr $(BUILD) $(TARGET).bin $(TARGET).elf
|
||||
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
else
|
||||
.PHONY: all
|
||||
|
||||
DEPENDS := $(OFILES:.o=.d)
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# main targets
|
||||
#---------------------------------------------------------------------------------
|
||||
all : $(OUTPUT).bin
|
||||
|
||||
$(OUTPUT).bin : $(OUTPUT).elf
|
||||
$(OBJCOPY) -S -O binary $< $@
|
||||
@echo built ... $(notdir $@)
|
||||
|
||||
$(OUTPUT).elf : $(OFILES)
|
||||
|
||||
%.elf:
|
||||
@echo linking $(notdir $@)
|
||||
$(LD) $(LDFLAGS) $(OFILES) $(LIBPATHS) $(LIBS) -o $@
|
||||
@$(NM) -CSn $@ > $(notdir $*.lst)
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# you need a rule like this for each extension you use as binary data
|
||||
#---------------------------------------------------------------------------------
|
||||
%.bin.o : %.bin
|
||||
#---------------------------------------------------------------------------------
|
||||
@echo $(notdir $<)
|
||||
@$(bin2o)
|
||||
|
||||
-include $(DEPENDS)
|
||||
|
||||
#---------------------------------------------------------------------------------------
|
||||
endif
|
||||
#---------------------------------------------------------------------------------------
|
9
mesosphere/README.md
Normal file
9
mesosphere/README.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
# Mesosphère
|
||||
|
||||
**WORK IN PROGRESS**
|
||||
|
||||
Special thanks to:
|
||||
|
||||
* @gdkchan ([Ryujinx](https://github.com/Ryujinx/Ryujinx)'s author), without whom I would have been unable to understand many complex mechanisms of the Horizon kernel, such as the scheduler, etc. Ryujinx's kernel HLE is pretty accurate, and of course part of this work has strong similarites to Ryujinx's kernel code.
|
||||
* @fincs, who helped me in the kernel reverse-engineering process a lot as well, and with countless other things too.
|
||||
|
25
mesosphere/config/arch/arm64/arch.mk
Normal file
25
mesosphere/config/arch/arm64/arch.mk
Normal file
|
@ -0,0 +1,25 @@
|
|||
ifeq ($(strip $(DEVKITPRO)),)
|
||||
|
||||
PREFIX := aarch64-none-elf-
|
||||
|
||||
export CC := $(PREFIX)gcc
|
||||
export CXX := $(PREFIX)g++
|
||||
export AS := $(PREFIX)as
|
||||
export AR := $(PREFIX)gcc-ar
|
||||
export OBJCOPY := $(PREFIX)objcopy
|
||||
|
||||
ISVC=$(or $(VCBUILDHELPER_COMMAND),$(MSBUILDEXTENSIONSPATH32),$(MSBUILDEXTENSIONSPATH))
|
||||
|
||||
ifneq (,$(ISVC))
|
||||
ERROR_FILTER := 2>&1 | sed -e 's/\(.[a-zA-Z]\+\):\([0-9]\+\):/\1(\2):/g'
|
||||
endif
|
||||
|
||||
else
|
||||
include $(DEVKITPRO)/devkitA64/base_tools
|
||||
endif
|
||||
|
||||
ARCH_SETTING := -march=armv8-a -mgeneral-regs-only
|
||||
ARCH_DEFINES := -DMESOSPHERE_ARCH_ARM64
|
||||
ARCH_CFLAGS :=
|
||||
ARCH_CXXFLAGS :=
|
||||
ARCH_SOURCE_DIRS := source/arch/arm64
|
214
mesosphere/config/arch/arm64/linker.ld
Normal file
214
mesosphere/config/arch/arm64/linker.ld
Normal file
|
@ -0,0 +1,214 @@
|
|||
OUTPUT_ARCH(aarch64)
|
||||
ENTRY(_start)
|
||||
|
||||
/* TODO overhaul */
|
||||
|
||||
PHDRS
|
||||
{
|
||||
code PT_LOAD FLAGS(5) /* Read | Execute */;
|
||||
rodata PT_LOAD FLAGS(4) /* Read */;
|
||||
data PT_LOAD FLAGS(6) /* Read | Write */;
|
||||
dyn PT_DYNAMIC;
|
||||
}
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
/* =========== CODE section =========== */
|
||||
PROVIDE(__start__ = 0x0);
|
||||
. = __start__;
|
||||
|
||||
.crt0 :
|
||||
{
|
||||
KEEP (*(.crt0))
|
||||
. = ALIGN(8);
|
||||
} :code
|
||||
|
||||
.init :
|
||||
{
|
||||
KEEP( *(.init) )
|
||||
. = ALIGN(8);
|
||||
} :code
|
||||
|
||||
.plt :
|
||||
{
|
||||
*(.plt)
|
||||
*(.iplt)
|
||||
. = ALIGN(8);
|
||||
} :code
|
||||
|
||||
.text :
|
||||
{
|
||||
*(.text.unlikely .text.*_unlikely .text.unlikely.*)
|
||||
*(.text.exit .text.exit.*)
|
||||
*(.text.startup .text.startup.*)
|
||||
*(.text.hot .text.hot.*)
|
||||
*(.text .stub .text.* .gnu.linkonce.t.*)
|
||||
. = ALIGN(8);
|
||||
} :code
|
||||
|
||||
.fini :
|
||||
{
|
||||
KEEP( *(.fini) )
|
||||
. = ALIGN(8);
|
||||
} :code
|
||||
|
||||
/* =========== RODATA section =========== */
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
.rodata :
|
||||
{
|
||||
*(.rodata .rodata.* .gnu.linkonce.r.*)
|
||||
. = ALIGN(8);
|
||||
} :rodata
|
||||
|
||||
.eh_frame_hdr : { __eh_frame_hdr_start = .; *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) __eh_frame_hdr_end = .; } :rodata
|
||||
.eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) } :rodata
|
||||
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) } :rodata
|
||||
.gnu_extab : ONLY_IF_RO { *(.gnu_extab*) } : rodata
|
||||
|
||||
.dynamic : { *(.dynamic) } :rodata :dyn
|
||||
.dynsym : { *(.dynsym) } :rodata
|
||||
.dynstr : { *(.dynstr) } :rodata
|
||||
.rela.dyn : { *(.rela.*) } :rodata
|
||||
.interp : { *(.interp) } :rodata
|
||||
.hash : { *(.hash) } :rodata
|
||||
.gnu.hash : { *(.gnu.hash) } :rodata
|
||||
.gnu.version : { *(.gnu.version) } :rodata
|
||||
.gnu.version_d : { *(.gnu.version_d) } :rodata
|
||||
.gnu.version_r : { *(.gnu.version_r) } :rodata
|
||||
.note.gnu.build-id : { *(.note.gnu.build-id) } :rodata
|
||||
|
||||
/* =========== DATA section =========== */
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
.eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) } :data
|
||||
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } :data
|
||||
.gnu_extab : ONLY_IF_RW { *(.gnu_extab*) } : data
|
||||
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } :data
|
||||
|
||||
.tdata ALIGN(8) :
|
||||
{
|
||||
__tdata_lma = .;
|
||||
*(.tdata .tdata.* .gnu.linkonce.td.*)
|
||||
. = ALIGN(8);
|
||||
__tdata_lma_end = .;
|
||||
} :data
|
||||
|
||||
.tbss ALIGN(8) :
|
||||
{
|
||||
*(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon)
|
||||
. = ALIGN(8);
|
||||
} :data
|
||||
|
||||
.preinit_array ALIGN(8) :
|
||||
{
|
||||
PROVIDE (__preinit_array_start = .);
|
||||
KEEP (*(.preinit_array))
|
||||
PROVIDE (__preinit_array_end = .);
|
||||
} :data
|
||||
|
||||
.init_array ALIGN(8) :
|
||||
{
|
||||
PROVIDE (__init_array_start = .);
|
||||
KEEP (*(SORT(.init_array.*)))
|
||||
KEEP (*(.init_array))
|
||||
PROVIDE (__init_array_end = .);
|
||||
} :data
|
||||
|
||||
.fini_array ALIGN(8) :
|
||||
{
|
||||
PROVIDE (__fini_array_start = .);
|
||||
KEEP (*(.fini_array))
|
||||
KEEP (*(SORT(.fini_array.*)))
|
||||
PROVIDE (__fini_array_end = .);
|
||||
} :data
|
||||
|
||||
.ctors ALIGN(8) :
|
||||
{
|
||||
KEEP (*crtbegin.o(.ctors)) /* MUST be first -- GCC requires it */
|
||||
KEEP (*(EXCLUDE_FILE (*crtend.o) .ctors))
|
||||
KEEP (*(SORT(.ctors.*)))
|
||||
KEEP (*(.ctors))
|
||||
} :data
|
||||
|
||||
.dtors ALIGN(8) :
|
||||
{
|
||||
KEEP (*crtbegin.o(.dtors))
|
||||
KEEP (*(EXCLUDE_FILE (*crtend.o) .dtors))
|
||||
KEEP (*(SORT(.dtors.*)))
|
||||
KEEP (*(.dtors))
|
||||
} :data
|
||||
|
||||
__got_start__ = .;
|
||||
|
||||
.got : { *(.got) *(.igot) } :data
|
||||
.got.plt : { *(.got.plt) *(.igot.plt) } :data
|
||||
|
||||
__got_end__ = .;
|
||||
|
||||
.data ALIGN(8) :
|
||||
{
|
||||
*(.data .data.* .gnu.linkonce.d.*)
|
||||
SORT(CONSTRUCTORS)
|
||||
} :data
|
||||
|
||||
__bss_start__ = .;
|
||||
.bss ALIGN(8) :
|
||||
{
|
||||
*(.dynbss)
|
||||
*(.bss .bss.* .gnu.linkonce.b.*)
|
||||
*(COMMON)
|
||||
. = ALIGN(8);
|
||||
|
||||
/* Reserve space for the TLS segment of the main thread */
|
||||
__tls_start = .;
|
||||
. += + SIZEOF(.tdata) + SIZEOF(.tbss);
|
||||
__tls_end = .;
|
||||
} : data
|
||||
__bss_end__ = .;
|
||||
|
||||
__end__ = ABSOLUTE(.) ;
|
||||
|
||||
. = ALIGN(0x1000);
|
||||
__argdata__ = ABSOLUTE(.) ;
|
||||
|
||||
/* ==================
|
||||
==== Metadata ====
|
||||
================== */
|
||||
|
||||
/* Discard sections that difficult post-processing */
|
||||
/DISCARD/ : { *(.group .comment .note) }
|
||||
|
||||
/* Stabs debugging sections. */
|
||||
.stab 0 : { *(.stab) }
|
||||
.stabstr 0 : { *(.stabstr) }
|
||||
.stab.excl 0 : { *(.stab.excl) }
|
||||
.stab.exclstr 0 : { *(.stab.exclstr) }
|
||||
.stab.index 0 : { *(.stab.index) }
|
||||
.stab.indexstr 0 : { *(.stab.indexstr) }
|
||||
|
||||
/* DWARF debug sections.
|
||||
Symbols in the DWARF debugging sections are relative to the beginning
|
||||
of the section so we begin them at 0. */
|
||||
|
||||
/* DWARF 1 */
|
||||
.debug 0 : { *(.debug) }
|
||||
.line 0 : { *(.line) }
|
||||
|
||||
/* GNU DWARF 1 extensions */
|
||||
.debug_srcinfo 0 : { *(.debug_srcinfo) }
|
||||
.debug_sfnames 0 : { *(.debug_sfnames) }
|
||||
|
||||
/* DWARF 1.1 and DWARF 2 */
|
||||
.debug_aranges 0 : { *(.debug_aranges) }
|
||||
.debug_pubnames 0 : { *(.debug_pubnames) }
|
||||
|
||||
/* DWARF 2 */
|
||||
.debug_info 0 : { *(.debug_info) }
|
||||
.debug_abbrev 0 : { *(.debug_abbrev) }
|
||||
.debug_line 0 : { *(.debug_line) }
|
||||
.debug_frame 0 : { *(.debug_frame) }
|
||||
.debug_str 0 : { *(.debug_str) }
|
||||
.debug_loc 0 : { *(.debug_loc) }
|
||||
.debug_macinfo 0 : { *(.debug_macinfo) }
|
||||
}
|
4
mesosphere/config/arch/arm64/linker.specs
Normal file
4
mesosphere/config/arch/arm64/linker.specs
Normal file
|
@ -0,0 +1,4 @@
|
|||
%rename link old_link
|
||||
|
||||
*link:
|
||||
%(old_link) -T %:getenv(ARCH_MAKE_DIR /linker.ld) -pie --gc-sections -z text -z nodynamic-undefined-weak --build-id=sha1
|
5
mesosphere/config/board/nintendo/switch/board.mk
Normal file
5
mesosphere/config/board/nintendo/switch/board.mk
Normal file
|
@ -0,0 +1,5 @@
|
|||
BOARD_SETTING := -mtune=cortex-a57
|
||||
BOARD_DEFINES := -DMESOSPHERE_BOARD_NINTENDO_SWITCH -DMESOSPHERE_BOARD_COMMON_ARM_ARM64_CLOCK
|
||||
BOARD_CFLAGS :=
|
||||
BOARD_CXXFLAGS :=
|
||||
BOARD_SOURCE_DIRS :=
|
9
mesosphere/config/common.mk
Normal file
9
mesosphere/config/common.mk
Normal file
|
@ -0,0 +1,9 @@
|
|||
COMMON_DEFINES := -DBOOST_DISABLE_ASSERTS
|
||||
COMMON_SOURCES_DIRS := source/core source/interfaces source/interrupts source/kresources\
|
||||
source/processes source/threading source
|
||||
COMMON_SETTING := -fPIE -g -nostdlib
|
||||
COMMON_CFLAGS := -Wall -Werror -O2 -ffunction-sections -fdata-sections -fno-strict-aliasing -fwrapv\
|
||||
-fno-asynchronous-unwind-tables -fno-unwind-tables -fno-stack-protector
|
||||
COMMON_CXXFLAGS := -fno-rtti -fno-exceptions -std=gnu++17
|
||||
COMMON_ASFLAGS :=
|
||||
COMMON_LDFLAGS := -Wl,-Map,out.map
|
37
mesosphere/config/rules.mk
Normal file
37
mesosphere/config/rules.mk
Normal file
|
@ -0,0 +1,37 @@
|
|||
#---------------------------------------------------------------------------------
|
||||
%.a:
|
||||
#---------------------------------------------------------------------------------
|
||||
@echo $(notdir $@)
|
||||
@rm -f $@
|
||||
$(AR) -rc $@ $^
|
||||
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
%.o: %.cpp
|
||||
@echo $(notdir $<)
|
||||
$(CXX) -MMD -MP -MF $(DEPSDIR)/$*.d $(CXXFLAGS) -c $< -o $@ $(ERROR_FILTER)
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
%.o: %.c
|
||||
@echo $(notdir $<)
|
||||
$(CC) -MMD -MP -MF $(DEPSDIR)/$*.d $(CFLAGS) -c $< -o $@ $(ERROR_FILTER)
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
%.o: %.s
|
||||
@echo $(notdir $<)
|
||||
$(CC) -MMD -MP -MF $(DEPSDIR)/$*.d -x assembler-with-cpp $(ASFLAGS) -c $< -o $@ $(ERROR_FILTER)
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
%.o: %.S
|
||||
@echo $(notdir $<)
|
||||
$(CC) -MMD -MP -MF $(DEPSDIR)/$*.d -x assembler-with-cpp $(ASFLAGS) -c $< -o $@ $(ERROR_FILTER)
|
||||
|
||||
#---------------------------------------------------------------------------------
|
||||
# canned command sequence for binary data
|
||||
#---------------------------------------------------------------------------------
|
||||
define bin2o
|
||||
bin2s $< | $(AS) -o $(@)
|
||||
echo "extern const u8" `(echo $(<F) | sed -e 's/^\([0-9]\)/_\1/' -e 's/[^A-Za-z0-9_]/_/g')`"_end[];" > `(echo $(<F) | tr . _)`.h
|
||||
echo "extern const u8" `(echo $(<F) | sed -e 's/^\([0-9]\)/_\1/' -e 's/[^A-Za-z0-9_]/_/g')`"[];" >> `(echo $(<F) | tr . _)`.h
|
||||
echo "extern const u32" `(echo $(<F) | sed -e 's/^\([0-9]\)/_\1/' -e 's/[^A-Za-z0-9_]/_/g')`_size";" >> `(echo $(<F) | tr . _)`.h
|
||||
endef
|
11
mesosphere/include/mesosphere/arch/KInterruptMaskGuard.hpp
Normal file
11
mesosphere/include/mesosphere/arch/KInterruptMaskGuard.hpp
Normal file
|
@ -0,0 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
#if 1 //defined MESOSPHERE_ARCH_ARM64
|
||||
|
||||
#include <mesosphere/arch/arm64/KInterruptMaskGuard.hpp>
|
||||
|
||||
#else
|
||||
|
||||
//#error "No arch defined"
|
||||
|
||||
#endif
|
11
mesosphere/include/mesosphere/arch/KSpinLock.hpp
Normal file
11
mesosphere/include/mesosphere/arch/KSpinLock.hpp
Normal file
|
@ -0,0 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
#if 1 //defined MESOSPHERE_ARCH_ARM64
|
||||
|
||||
#include <mesosphere/arch/arm64/KSpinLock.hpp>
|
||||
|
||||
#else
|
||||
|
||||
//#error "No arch defined"
|
||||
|
||||
#endif
|
|
@ -0,0 +1,40 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <mesosphere/arch/arm64/arm64.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
inline namespace arch
|
||||
{
|
||||
inline namespace arm64
|
||||
{
|
||||
|
||||
// Dummy. Should be platform-independent:
|
||||
|
||||
class KInterruptMaskGuard final {
|
||||
public:
|
||||
|
||||
KInterruptMaskGuard()
|
||||
{
|
||||
flags = MESOSPHERE_READ_SYSREG(daif);
|
||||
MESOSPHERE_WRITE_SYSREG(flags | PSR_I_BIT, daif);
|
||||
}
|
||||
|
||||
~KInterruptMaskGuard()
|
||||
{
|
||||
MESOSPHERE_WRITE_SYSREG(MESOSPHERE_READ_SYSREG(daif) | (flags & PSR_I_BIT), daif);
|
||||
}
|
||||
|
||||
KInterruptMaskGuard(const KInterruptMaskGuard &) = delete;
|
||||
KInterruptMaskGuard(KInterruptMaskGuard &&) = delete;
|
||||
KInterruptMaskGuard &operator=(const KInterruptMaskGuard &) = delete;
|
||||
KInterruptMaskGuard &operator=(KInterruptMaskGuard &&) = delete;
|
||||
|
||||
private:
|
||||
u64 flags;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}
|
103
mesosphere/include/mesosphere/arch/arm64/KSpinLock.hpp
Normal file
103
mesosphere/include/mesosphere/arch/arm64/KSpinLock.hpp
Normal file
|
@ -0,0 +1,103 @@
|
|||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
inline namespace arch
|
||||
{
|
||||
inline namespace arm64
|
||||
{
|
||||
|
||||
// This largely uses the Linux kernel spinlock code, which is more efficient than Nintendo's (serializing two u16s into an u32).
|
||||
class KSpinLock final {
|
||||
|
||||
private:
|
||||
|
||||
struct alignas(4) Ticket {
|
||||
u16 owner, next;
|
||||
};
|
||||
|
||||
Ticket ticket;
|
||||
|
||||
public:
|
||||
|
||||
bool try_lock()
|
||||
{
|
||||
u32 tmp;
|
||||
Ticket lockval;
|
||||
|
||||
asm volatile(
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldaxr %w0, %2\n"
|
||||
" eor %w1, %w0, %w0, ror #16\n"
|
||||
" cbnz %w1, 2f\n"
|
||||
" add %w0, %w0, %3\n"
|
||||
" stxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
"2:"
|
||||
: "=&r" (lockval), "=&r" (tmp), "+Q" (ticket)
|
||||
: "I" (1 << 16)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
return !tmp;
|
||||
}
|
||||
|
||||
void lock()
|
||||
{
|
||||
u32 tmp;
|
||||
Ticket lockval, newval;
|
||||
|
||||
asm volatile(
|
||||
// Atomically increment the next ticket.
|
||||
" prfm pstl1strm, %3\n"
|
||||
"1: ldaxr %w0, %3\n"
|
||||
" add %w1, %w0, %w5\n"
|
||||
" stxr %w2, %w1, %3\n"
|
||||
" cbnz %w2, 1b\n"
|
||||
|
||||
|
||||
// Did we get the lock?
|
||||
" eor %w1, %w0, %w0, ror #16\n"
|
||||
" cbz %w1, 3f\n"
|
||||
/*
|
||||
No: spin on the owner. Send a local event to avoid missing an
|
||||
unlock before the exclusive load.
|
||||
*/
|
||||
" sevl\n"
|
||||
"2: wfe\n"
|
||||
" ldaxrh %w2, %4\n"
|
||||
" eor %w1, %w2, %w0, lsr #16\n"
|
||||
" cbnz %w1, 2b\n"
|
||||
// We got the lock. Critical section starts here.
|
||||
"3:"
|
||||
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*&ticket)
|
||||
: "Q" (ticket.owner), "I" (1 << 16)
|
||||
: "memory"
|
||||
);
|
||||
}
|
||||
|
||||
void unlock()
|
||||
{
|
||||
u64 tmp;
|
||||
asm volatile(
|
||||
" ldrh %w1, %0\n"
|
||||
" add %w1, %w1, #1\n"
|
||||
" stlrh %w1, %0"
|
||||
: "=Q" (ticket.owner), "=&r" (tmp)
|
||||
:
|
||||
: "memory"
|
||||
);
|
||||
|
||||
}
|
||||
KSpinLock() = default;
|
||||
KSpinLock(const KSpinLock &) = delete;
|
||||
KSpinLock(KSpinLock &&) = delete;
|
||||
KSpinLock &operator=(const KSpinLock &) = delete;
|
||||
KSpinLock &operator=(KSpinLock &&) = delete;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}
|
67
mesosphere/include/mesosphere/arch/arm64/arm64.hpp
Normal file
67
mesosphere/include/mesosphere/arch/arm64/arm64.hpp
Normal file
|
@ -0,0 +1,67 @@
|
|||
#pragma once
|
||||
|
||||
#include <boost/preprocessor.hpp>
|
||||
#include <mesosphere/core/types.hpp>
|
||||
|
||||
#define MESOSPHERE_READ_SYSREG(r) ({\
|
||||
u64 __val; \
|
||||
asm volatile("mrs %0, " BOOST_PP_STRINGIZE(r) : "=r" (__val)); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define MESOSPHERE_WRITE_SYSREG(v, r) do { \
|
||||
u64 __val = (u64)v; \
|
||||
asm volatile("msr " BOOST_PP_STRINGIZE(r) ", %0" \
|
||||
:: "r" (__val)); \
|
||||
} while (false)
|
||||
|
||||
#define MESOSPHERE_DAIF_BIT(v) (((u64)(v)) >> 6)
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
inline namespace arch
|
||||
{
|
||||
inline namespace arm64
|
||||
{
|
||||
|
||||
enum PsrMode {
|
||||
PSR_MODE_EL0t = 0x0u,
|
||||
PSR_MODE_EL1t = 0x4u,
|
||||
PSR_MODE_EL1h = 0x5u,
|
||||
PSR_MODE_EL2t = 0x8u,
|
||||
PSR_MODE_EL2h = 0x9u,
|
||||
PSR_MODE_EL3t = 0xCu,
|
||||
PSR_MODE_EL3h = 0xDu,
|
||||
PSR_MODE_MASK = 0xFu,
|
||||
PSR_MODE32_BIT = 0x10u,
|
||||
};
|
||||
|
||||
enum PsrInterruptBit {
|
||||
PSR_F_BIT = 1u << 6,
|
||||
PSR_I_BIT = 1u << 7,
|
||||
PSR_A_BIT = 1u << 8,
|
||||
PSR_D_BIT = 1u << 9,
|
||||
};
|
||||
|
||||
enum PsrStatusBit {
|
||||
PSR_PAN_BIT = 1u << 22,
|
||||
PSR_UAO_BIT = 1u << 23,
|
||||
};
|
||||
|
||||
enum PsrFlagBit {
|
||||
PSR_V_BIT = 1u << 28,
|
||||
PSR_C_BIT = 1u << 29,
|
||||
PSR_Z_BIT = 1u << 30,
|
||||
PSR_N_BIT = 1u << 31,
|
||||
};
|
||||
|
||||
enum PsrBitGroup {
|
||||
PSR_c = 0x000000FFu,
|
||||
PSR_x = 0x0000FF00u,
|
||||
PSR_s = 0x00FF0000u,
|
||||
PSR_f = 0xFF000000u,
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}
|
11
mesosphere/include/mesosphere/board/KSystemClock.hpp
Normal file
11
mesosphere/include/mesosphere/board/KSystemClock.hpp
Normal file
|
@ -0,0 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
#if 1 //defined MESOSPHERE_ARCH_ARM64
|
||||
|
||||
#include <mesosphere/board/common/arm/arm64/timer/KSystemClock.hpp>
|
||||
|
||||
#else
|
||||
|
||||
//#error "No arch defined"
|
||||
|
||||
#endif
|
|
@ -0,0 +1,83 @@
|
|||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <mesosphere/arch/arm64/arm64.hpp>
|
||||
|
||||
#ifndef MESOSPHERE_SYSTEM_CLOCK_RATE // NEEDS to be defined; depends on cntfreq
|
||||
#define MESOSPHERE_SYSTEM_CLOCK_RATE 192000000ull
|
||||
#endif
|
||||
|
||||
// Architectural aarch64 armv8 timer
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
inline namespace board
|
||||
{
|
||||
inline namespace common
|
||||
{
|
||||
inline namespace arm
|
||||
{
|
||||
inline namespace arm64
|
||||
{
|
||||
|
||||
// Dummy implementation
|
||||
// Needs to be changed for platform stuff
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
/// Fulfills Clock named requirements
|
||||
class KSystemClock {
|
||||
public:
|
||||
|
||||
using rep = s64;
|
||||
using period = std::ratio<1, MESOSPHERE_SYSTEM_CLOCK_RATE>;
|
||||
using duration = std::chrono::duration<rep, period>;
|
||||
using time_point = std::chrono::time_point<KSystemClock>;
|
||||
|
||||
static constexpr bool is_steady = true;
|
||||
|
||||
static time_point now()
|
||||
{
|
||||
return time_point{duration::zero()};
|
||||
}
|
||||
|
||||
static constexpr bool isCorePrivate = true;
|
||||
static constexpr duration forever = duration{-1};
|
||||
static constexpr time_point never = time_point{forever};
|
||||
|
||||
static constexpr uint GetIrqId() { return 30; }
|
||||
|
||||
static void Disable()
|
||||
{
|
||||
// Note: still continues counting.
|
||||
MESOSPHERE_WRITE_SYSREG(0, cntp_ctl_el0);
|
||||
}
|
||||
|
||||
static void SetInterruptMasked(bool maskInterrupts)
|
||||
{
|
||||
u64 val = maskInterrupts ? 3 : 1; // Note: also enables the timer.
|
||||
MESOSPHERE_WRITE_SYSREG(val, cntp_ctl_el0);
|
||||
}
|
||||
|
||||
static void SetAlarm(const time_point &when)
|
||||
{
|
||||
u64 val = (u64)when.time_since_epoch().count();
|
||||
MESOSPHERE_WRITE_SYSREG(val, cntp_cval_el0);
|
||||
SetInterruptMasked(false);
|
||||
}
|
||||
|
||||
static void Initialize()
|
||||
{
|
||||
MESOSPHERE_WRITE_SYSREG(1, cntkctl_el1); // Trap register accesses from el0.
|
||||
Disable();
|
||||
MESOSPHERE_WRITE_SYSREG(UINT64_MAX, cntp_cval_el0);
|
||||
SetInterruptMasked(true);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
36
mesosphere/include/mesosphere/core/KCoreContext.hpp
Normal file
36
mesosphere/include/mesosphere/core/KCoreContext.hpp
Normal file
|
@ -0,0 +1,36 @@
|
|||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <mesosphere/core/util.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
class KProcess;
|
||||
class KThread;
|
||||
class KScheduler;
|
||||
class KAlarm;
|
||||
|
||||
class KCoreContext {
|
||||
public:
|
||||
static KCoreContext &GetInstance(uint coreId) { return instances[coreId]; };
|
||||
static KCoreContext &GetCurrentInstance() { return instances[0]; /* FIXME*/ };
|
||||
|
||||
KThread *GetCurrentThread() const { return currentThread; }
|
||||
KProcess *GetCurrentProcess() const { return currentProcess; }
|
||||
KScheduler *GetScheduler() const { return scheduler; }
|
||||
KAlarm *GetAlarm() const { return alarm; }
|
||||
|
||||
KCoreContext(KScheduler *scheduler) : scheduler(scheduler) {}
|
||||
private:
|
||||
KThread *volatile currentThread = nullptr;
|
||||
KProcess *volatile currentProcess = nullptr;
|
||||
KScheduler *volatile scheduler = nullptr;
|
||||
KAlarm *volatile alarm = nullptr;
|
||||
|
||||
// more stuff
|
||||
|
||||
static std::array<KCoreContext, MAX_CORES> instances;
|
||||
};
|
||||
|
||||
}
|
62
mesosphere/include/mesosphere/core/types.hpp
Normal file
62
mesosphere/include/mesosphere/core/types.hpp
Normal file
|
@ -0,0 +1,62 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <climits>
|
||||
#include <boost/smart_ptr/intrusive_ptr.hpp>
|
||||
|
||||
#define MAX_CORES 4
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
using ushort = unsigned short;
|
||||
using uint = unsigned int;
|
||||
using ulong = unsigned long;
|
||||
|
||||
using std::size_t;
|
||||
|
||||
using uiptr = std::uintptr_t;
|
||||
using iptr = std::intptr_t;
|
||||
|
||||
using u8 = uint8_t;
|
||||
using u16 = uint16_t;
|
||||
using u32 = uint32_t;
|
||||
using u64 = uint64_t;
|
||||
|
||||
using s8 = int8_t;
|
||||
using s16 = int16_t;
|
||||
using s32 = int32_t;
|
||||
using s64 = int64_t;
|
||||
|
||||
using vu8 = volatile uint8_t;
|
||||
using vu16 = volatile uint16_t;
|
||||
using vu32 = volatile uint32_t;
|
||||
using vu64 = volatile uint64_t;
|
||||
|
||||
using vs8 = volatile int8_t;
|
||||
using vs16 = volatile int16_t;
|
||||
using vs32 = volatile int32_t;
|
||||
using vs64 = volatile int64_t;
|
||||
|
||||
using Result = uint;
|
||||
|
||||
template <typename T>
|
||||
using SharedPtr = boost::intrusive_ptr<T>;
|
||||
|
||||
struct Handle {
|
||||
u16 index : 15;
|
||||
s16 id : 16;
|
||||
bool isAlias : 1;
|
||||
|
||||
constexpr bool IsAliasOrFree() const { return isAlias || id < 0; }
|
||||
|
||||
constexpr bool operator==(const Handle &other) const
|
||||
{
|
||||
return index == other.index && id == other.id && isAlias == other.isAlias;
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const Handle &other) const { return !(*this == other); }
|
||||
};
|
||||
|
||||
}
|
109
mesosphere/include/mesosphere/core/util.hpp
Normal file
109
mesosphere/include/mesosphere/core/util.hpp
Normal file
|
@ -0,0 +1,109 @@
|
|||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
#include <array>
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
#include <mesosphere/core/types.hpp>
|
||||
|
||||
/*
|
||||
Boost doesn't provide get_parent_from members for arrays so we have to implement this manually
|
||||
for arrays, for gcc at leadt.
|
||||
|
||||
Thanks fincs.
|
||||
*/
|
||||
|
||||
#define kassert(cond) ((void)(cond))
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename ClassT, typename MemberT>
|
||||
union __my_offsetof {
|
||||
const MemberT ClassT::* ptr;
|
||||
iptr offset;
|
||||
};
|
||||
|
||||
// Thanks neobrain
|
||||
template<typename T, size_t N, typename... Args, size_t... Indexes>
|
||||
static constexpr std::array<T, N> MakeArrayOfHelper(Args&&... args, std::index_sequence<Indexes...>) {
|
||||
// There are two parameter pack expansions here:
|
||||
// * The inner expansion is over "t"
|
||||
// * The outer expansion is over "Indexes"
|
||||
//
|
||||
// This function will always be called with sizeof...(Indexes) == N,
|
||||
// so the outer expansion generates exactly N copies of the constructor call
|
||||
return std::array<T, N> { ((void)Indexes, T { args... })... };
|
||||
}
|
||||
|
||||
// Thanks neobrain
|
||||
template<typename T, typename F, size_t N, typename... Args, size_t... Indexes>
|
||||
static constexpr std::array<T, N> MakeArrayWithFactorySequenceOfHelper(Args&&... args, std::index_sequence<Indexes...>) {
|
||||
return std::array<T, N> { T { F{}(std::integral_constant<size_t, Indexes>{}), args... }... };
|
||||
}
|
||||
}
|
||||
|
||||
namespace detail
|
||||
{
|
||||
|
||||
template <typename ClassT, typename MemberT, size_t N>
|
||||
constexpr ClassT* GetParentFromArrayMember(MemberT* member, size_t index, const MemberT (ClassT::* ptr)[N]) noexcept {
|
||||
member -= index;
|
||||
return (ClassT*)((iptr)member - __my_offsetof<ClassT,MemberT[N]> { ptr }.offset);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename ClassT, typename MemberT, size_t N>
|
||||
constexpr const ClassT* GetParentFromArrayMember(const MemberT* member, size_t index, const MemberT (ClassT::* ptr)[N]) noexcept {
|
||||
member -= index;
|
||||
return (const ClassT*)((iptr)member - __my_offsetof<ClassT,MemberT[N]> { ptr }.offset);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename ClassT, typename MemberT>
|
||||
constexpr ClassT* GetParentFromMember(MemberT* member, const MemberT ClassT::* ptr) noexcept {
|
||||
return (ClassT*)((iptr)member - __my_offsetof<ClassT, MemberT> { ptr }.offset);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename ClassT, typename MemberT>
|
||||
constexpr const ClassT* GetParentFromMember(const MemberT* member, const MemberT ClassT::* ptr) noexcept {
|
||||
return (const ClassT*)((iptr)member - __my_offsetof<ClassT, MemberT> { ptr }.offset);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template<typename T, size_t N, typename... Args>
|
||||
constexpr std::array<T, N> MakeArrayOf(Args&&... args) {
|
||||
return MakeArrayOfHelper<T, N, Args...>(std::forward<Args>(args)..., std::make_index_sequence<N>{});
|
||||
}
|
||||
|
||||
template<typename T, typename F, size_t N, typename... Args>
|
||||
constexpr std::array<T, N> MakeArrayWithFactorySequenceOf(Args&&... args) {
|
||||
return MakeArrayWithFactorySequenceOfHelper<T, F, N, Args...>(std::forward<Args>(args)..., std::make_index_sequence<N>{});
|
||||
}
|
||||
|
||||
/// Sequence of two distinc powers of 2
|
||||
constexpr ulong A038444(ulong n)
|
||||
{
|
||||
if (n == 0) {
|
||||
return 3;
|
||||
}
|
||||
|
||||
ulong v = A038444(n - 1);
|
||||
ulong m1 = 1 << (63 - __builtin_clzl(v));
|
||||
ulong m2 = 1 << (63 - __builtin_clzl(v&~m1));
|
||||
|
||||
if (m2 << 1 == m1) {
|
||||
m2 = 1;
|
||||
m1 <<= 1;
|
||||
} else {
|
||||
m2 <<= 1;
|
||||
}
|
||||
|
||||
return m1 | m2;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
61
mesosphere/include/mesosphere/interfaces/IAlarmable.hpp
Normal file
61
mesosphere/include/mesosphere/interfaces/IAlarmable.hpp
Normal file
|
@ -0,0 +1,61 @@
|
|||
#pragma once
|
||||
|
||||
#include <boost/intrusive/set.hpp>
|
||||
#include <mesosphere/board/KSystemClock.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
struct KAlarm;
|
||||
|
||||
struct AlarmableSetTag;
|
||||
|
||||
using AlarmableSetBaseHook = boost::intrusive::set_base_hook<
|
||||
boost::intrusive::tag<AlarmableSetTag>,
|
||||
boost::intrusive::link_mode<boost::intrusive::normal_link>
|
||||
>;
|
||||
|
||||
class IAlarmable : public AlarmableSetBaseHook {
|
||||
public:
|
||||
struct Comparator {
|
||||
constexpr bool operator()(const IAlarmable &lhs, const IAlarmable &rhs) const {
|
||||
return lhs.alarmTime < rhs.alarmTime;
|
||||
}
|
||||
};
|
||||
|
||||
virtual void OnAlarm() = 0;
|
||||
|
||||
constexpr KSystemClock::time_point GetAlarmTime() const { return alarmTime; }
|
||||
|
||||
/// Precondition: alarm has not been set
|
||||
template<typename Clock, typename Duration>
|
||||
void SetAlarmTime(const std::chrono::time_point<Clock, Duration> &alarmTime)
|
||||
{
|
||||
SetAlarmTime(alarmTime);
|
||||
}
|
||||
|
||||
template<typename Rep, typename Period>
|
||||
void SetAlarmIn(const std::chrono::duration<Rep, Period> &alarmTimeOffset)
|
||||
{
|
||||
SetAlarmTime(KSystemClock::now() + alarmTimeOffset);
|
||||
}
|
||||
|
||||
void ClearAlarm();
|
||||
|
||||
private:
|
||||
void SetAlarmTimeImpl(const KSystemClock::time_point &alarmTime);
|
||||
|
||||
KSystemClock::time_point alarmTime = KSystemClock::time_point{};
|
||||
|
||||
friend class KAlarm;
|
||||
};
|
||||
|
||||
|
||||
using AlarmableSetType =
|
||||
boost::intrusive::make_set<
|
||||
IAlarmable,
|
||||
boost::intrusive::base_hook<AlarmableSetBaseHook>,
|
||||
boost::intrusive::compare<IAlarmable::Comparator>
|
||||
>::type;
|
||||
|
||||
}
|
16
mesosphere/include/mesosphere/interfaces/IInterruptible.hpp
Normal file
16
mesosphere/include/mesosphere/interfaces/IInterruptible.hpp
Normal file
|
@ -0,0 +1,16 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/core/util.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
class IWork;
|
||||
|
||||
class IInterruptible {
|
||||
public:
|
||||
|
||||
/// Top half in Linux jargon
|
||||
virtual IWork *HandleInterrupt(uint interruptId) = 0;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/interfaces/IWork.hpp>
|
||||
#include <mesosphere/interfaces/IInterruptible.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
class IInterruptibleWork : public IInterruptible, public IWork {
|
||||
public:
|
||||
|
||||
virtual IWork *HandleInterrupt(uint interruptId) override;
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
#pragma once
|
||||
// circular dep: #include "resource_limit.h"
|
||||
|
||||
#include <mesosphere/kresources/KAutoObject.hpp>
|
||||
#include <tuple>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
namespace detail
|
||||
{
|
||||
|
||||
void ReleaseResource(const SharedPtr<KProcess> &owner, KAutoObject::TypeId typeId, size_t count, size_t realCount);
|
||||
void ReleaseResource(const SharedPtr<KResourceLimit> &reslimit, KAutoObject::TypeId typeId, size_t count, size_t realCount);
|
||||
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
class ILimitedResource {
|
||||
public:
|
||||
|
||||
const SharedPtr<KProcess>& GetResourceOwner() const { return resourceOwner; }
|
||||
void SetResourceOwner(SharedPtr<KProcess> owner)
|
||||
{
|
||||
resourceOwner = std::move(owner);
|
||||
isLimitedResourceActive = true;
|
||||
}
|
||||
|
||||
virtual std::tuple<size_t, size_t> GetResourceCount()
|
||||
{
|
||||
return {1, 1}; // current, real
|
||||
}
|
||||
|
||||
~ILimitedResource()
|
||||
{
|
||||
if (isLimitedResourceActive) {
|
||||
auto [cur, real] = GetResourceCount();
|
||||
detail::ReleaseResource(resourceOwner, Derived::typeId, cur, real);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
SharedPtr<KProcess> resourceOwner{};
|
||||
bool isLimitedResourceActive = false;
|
||||
};
|
||||
|
||||
}
|
60
mesosphere/include/mesosphere/interfaces/ISetAllocated.hpp
Normal file
60
mesosphere/include/mesosphere/interfaces/ISetAllocated.hpp
Normal file
|
@ -0,0 +1,60 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/kresources/KObjectAllocator.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
template<typename Derived>
|
||||
class ISetAllocated : public KObjectAllocator<Derived>::AllocatedSetHookType
|
||||
{
|
||||
public:
|
||||
static void InitializeAllocator(void *buffer, size_t capacity) noexcept
|
||||
{
|
||||
allocator.GetSlabHeap().initialize(buffer, capacity);
|
||||
}
|
||||
|
||||
void *operator new(size_t sz) noexcept
|
||||
{
|
||||
kassert(sz == sizeof(Derived));
|
||||
return allocator.GetSlabHeap().allocate();
|
||||
}
|
||||
|
||||
void operator delete(void *ptr) noexcept
|
||||
{
|
||||
allocator.GetSlabHeap().deallocate((Derived *)ptr);
|
||||
}
|
||||
|
||||
protected:
|
||||
void AddToAllocatedSet() noexcept
|
||||
{
|
||||
Derived *d = (Derived *)this;
|
||||
allocator.RegisterObject(*d);
|
||||
isRegisteredToAllocator = true;
|
||||
}
|
||||
|
||||
void RemoveFromAllocatedSet() noexcept
|
||||
{
|
||||
Derived *d = (Derived *)this;
|
||||
allocator.UnregisterObject(*d);
|
||||
}
|
||||
|
||||
virtual ~ISetAllocated()
|
||||
{
|
||||
if (isRegisteredToAllocator) {
|
||||
RemoveFromAllocatedSet();
|
||||
isRegisteredToAllocator = false;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
bool isRegisteredToAllocator = false;
|
||||
|
||||
protected:
|
||||
static KObjectAllocator<Derived> allocator;
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
KObjectAllocator<Derived> ISetAllocated<Derived>::allocator{};
|
||||
|
||||
}
|
35
mesosphere/include/mesosphere/interfaces/ISlabAllocated.hpp
Normal file
35
mesosphere/include/mesosphere/interfaces/ISlabAllocated.hpp
Normal file
|
@ -0,0 +1,35 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/kresources/KSlabHeap.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
template<typename Derived>
|
||||
class ISlabAllocated
|
||||
{
|
||||
public:
|
||||
static void InitializeSlabHeap(void *buffer, size_t capacity) noexcept
|
||||
{
|
||||
slabHeap.initialize(buffer, capacity);
|
||||
}
|
||||
|
||||
void *operator new(size_t sz) noexcept
|
||||
{
|
||||
kassert(sz == sizeof(Derived));
|
||||
return slabHeap.allocate();
|
||||
}
|
||||
|
||||
void operator delete(void *ptr) noexcept
|
||||
{
|
||||
slabHeap.deallocate((Derived *)ptr);
|
||||
}
|
||||
|
||||
protected:
|
||||
static KSlabHeap<Derived> slabHeap;
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
KSlabHeap<Derived> ISlabAllocated<Derived>::slabHeap{};
|
||||
|
||||
}
|
29
mesosphere/include/mesosphere/interfaces/IWork.hpp
Normal file
29
mesosphere/include/mesosphere/interfaces/IWork.hpp
Normal file
|
@ -0,0 +1,29 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <boost/intrusive/slist.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
struct WorkSListTag;
|
||||
|
||||
using WorkSListBaseHook = boost::intrusive::slist_base_hook<
|
||||
boost::intrusive::tag<WorkSListTag>,
|
||||
boost::intrusive::link_mode<boost::intrusive::normal_link>
|
||||
>;
|
||||
|
||||
/// Bottom half in Linux jargon
|
||||
class IWork : public WorkSListBaseHook {
|
||||
public:
|
||||
virtual void DoWork() = 0;
|
||||
};
|
||||
|
||||
using WorkSList = boost::intrusive::make_slist<
|
||||
IWork,
|
||||
boost::intrusive::base_hook<WorkSListBaseHook>,
|
||||
boost::intrusive::cache_last<true>,
|
||||
boost::intrusive::constant_time_size<false>
|
||||
>::type;
|
||||
|
||||
}
|
34
mesosphere/include/mesosphere/interrupts/KAlarm.hpp
Normal file
34
mesosphere/include/mesosphere/interrupts/KAlarm.hpp
Normal file
|
@ -0,0 +1,34 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/interfaces/IInterruptibleWork.hpp>
|
||||
#include <mesosphere/interfaces/IAlarmable.hpp>
|
||||
#include <mesosphere/arch/KSpinLock.hpp>
|
||||
#include <mesosphere/board/KSystemClock.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
class KAlarm final : public IInterruptibleWork {
|
||||
public:
|
||||
|
||||
//KAlarm() = default;
|
||||
|
||||
/// Precondition: alarmable not already added
|
||||
void AddAlarmable(IAlarmable &alarmable);
|
||||
|
||||
/// Precondition: alarmable is present
|
||||
void RemoveAlarmable(const IAlarmable &alarmable);
|
||||
|
||||
void HandleAlarm();
|
||||
|
||||
KAlarm(const KAlarm &) = delete;
|
||||
KAlarm(KAlarm &&) = delete;
|
||||
KAlarm &operator=(const KAlarm &) = delete;
|
||||
KAlarm &operator=(KAlarm &&) = delete;
|
||||
|
||||
private:
|
||||
KSpinLock spinlock{};
|
||||
AlarmableSetType alarmables{};
|
||||
};
|
||||
|
||||
}
|
27
mesosphere/include/mesosphere/interrupts/KWorkQueue.hpp
Normal file
27
mesosphere/include/mesosphere/interrupts/KWorkQueue.hpp
Normal file
|
@ -0,0 +1,27 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/interfaces/IWork.hpp>
|
||||
#include <mesosphere/kresources/KAutoObject.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
class KWorkQueue final {
|
||||
public:
|
||||
|
||||
void AddWork(IWork &work);
|
||||
void Initialize();
|
||||
|
||||
void HandleWorkQueue();
|
||||
|
||||
KWorkQueue(const KWorkQueue &) = delete;
|
||||
KWorkQueue(KWorkQueue &&) = delete;
|
||||
KWorkQueue &operator=(const KWorkQueue &) = delete;
|
||||
KWorkQueue &operator=(KWorkQueue &&) = delete;
|
||||
|
||||
private:
|
||||
WorkSList workQueue{};
|
||||
SharedPtr<KThread> handlerThread{};
|
||||
};
|
||||
|
||||
}
|
152
mesosphere/include/mesosphere/kresources/KAutoObject.hpp
Normal file
152
mesosphere/include/mesosphere/kresources/KAutoObject.hpp
Normal file
|
@ -0,0 +1,152 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <atomic>
|
||||
#include <type_traits>
|
||||
|
||||
#define MESOSPHERE_AUTO_OBJECT_TRAITS(BaseId, DerivedId)\
|
||||
using BaseClass = K##BaseId ;\
|
||||
static constexpr KAutoObject::TypeId typeId = KAutoObject::TypeId::DerivedId;\
|
||||
virtual ushort GetClassToken() const\
|
||||
{\
|
||||
return KAutoObject::GenerateClassToken<K##DerivedId >();\
|
||||
}\
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
// Foward declarations for intrusive_ptr
|
||||
class KProcess;
|
||||
class KResourceLimit;
|
||||
class KThread;
|
||||
|
||||
void intrusive_ptr_add_ref(KProcess *obj);
|
||||
void intrusive_ptr_release(KProcess *obj);
|
||||
|
||||
void intrusive_ptr_add_ref(KResourceLimit *obj);
|
||||
void intrusive_ptr_release(KResourceLimit *obj);
|
||||
|
||||
class KAutoObject {
|
||||
public:
|
||||
|
||||
/// Class token for polymorphic type checking
|
||||
virtual ushort GetClassToken() const
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Comparison key for KObjectAllocator
|
||||
virtual u64 GetComparisonKey() const
|
||||
{
|
||||
return (u64)(uiptr)this;
|
||||
}
|
||||
|
||||
/// Is alive (checked for deletion)
|
||||
virtual bool IsAlive() const = 0;
|
||||
|
||||
/// Virtual destructor
|
||||
virtual ~KAutoObject();
|
||||
|
||||
|
||||
/// Check if the offset is base class of T or T itself
|
||||
template<typename T>
|
||||
bool IsInstanceOf() const
|
||||
{
|
||||
ushort btoken = GenerateClassToken<T>();
|
||||
ushort dtoken = GetClassToken();
|
||||
|
||||
return (dtoken & btoken) == btoken;
|
||||
}
|
||||
|
||||
// Explicitely disable copy and move, and add default ctor
|
||||
KAutoObject() = default;
|
||||
KAutoObject(const KAutoObject &) = delete;
|
||||
KAutoObject(KAutoObject &&) = delete;
|
||||
KAutoObject &operator=(const KAutoObject &) = delete;
|
||||
KAutoObject &operator=(KAutoObject &&) = delete;
|
||||
|
||||
/// Type order as found in official kernel
|
||||
enum class TypeId : ushort {
|
||||
AutoObject = 0,
|
||||
SynchronizationObject,
|
||||
ReadableEvent,
|
||||
|
||||
FinalClassesMin = 3,
|
||||
|
||||
InterruptEvent = 3,
|
||||
Debug,
|
||||
ClientSession,
|
||||
Thread,
|
||||
Process,
|
||||
Session,
|
||||
ServerPort,
|
||||
ResourceLimit,
|
||||
SharedMemory,
|
||||
LightClientSession,
|
||||
ServerSession,
|
||||
LightSession,
|
||||
Event,
|
||||
LightServerSession,
|
||||
DeviceAddressSpace,
|
||||
ClientPort,
|
||||
Port,
|
||||
WritableEvent,
|
||||
TransferMemory,
|
||||
SessionRequest,
|
||||
CodeMemory, // JIT
|
||||
|
||||
FinalClassesMax = CodeMemory,
|
||||
};
|
||||
|
||||
private:
|
||||
std::atomic<ulong> referenceCount{0}; // official kernel has u32 for this
|
||||
friend void intrusive_ptr_add_ref(KAutoObject *obj);
|
||||
friend void intrusive_ptr_release(KAutoObject *obj);
|
||||
|
||||
protected:
|
||||
|
||||
template<typename T>
|
||||
static constexpr ushort GenerateClassToken()
|
||||
{
|
||||
/* The token follows these following properties:
|
||||
* Multiple inheritance is not supported
|
||||
* (BaseToken & DerivedToken) == BaseToken
|
||||
* The token for KAutoObject is 0
|
||||
* Not-final classes have a token of (1 << typeid)
|
||||
* Final derived classes have a unique token part of Seq[typeid - DerivedClassMin] | 0x100,
|
||||
where Seq is (in base 2) 11, 101, 110, 1001, 1010, and so on...
|
||||
*/
|
||||
if constexpr (std::is_same_v<T, KAutoObject>) {
|
||||
return 0;
|
||||
} else if constexpr (!std::is_final_v<T>) {
|
||||
return (1 << (ushort)T::typeId) | GenerateClassToken<typename T::BaseClass>();
|
||||
} else {
|
||||
ushort off = (ushort)T::typeId - (ushort)TypeId::FinalClassesMin;
|
||||
return ((ushort)detail::A038444(off) << 9) | 0x100u | GenerateClassToken<typename T::BaseClass>();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
inline void intrusive_ptr_add_ref(KAutoObject *obj)
|
||||
{
|
||||
ulong oldval = obj->referenceCount.fetch_add(1);
|
||||
kassert(oldval + 1 != 0);
|
||||
}
|
||||
|
||||
inline void intrusive_ptr_release(KAutoObject *obj)
|
||||
{
|
||||
ulong oldval = obj->referenceCount.fetch_sub(1);
|
||||
if (oldval - 1 == 0) {
|
||||
delete obj;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline SharedPtr<T> DynamicObjectCast(SharedPtr<KAutoObject> object) {
|
||||
if (object != nullptr && object->IsInstanceOf<T>()) {
|
||||
return boost::static_pointer_cast<T>(object);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
#pragma once
|
||||
#include <boost/intrusive/set.hpp>
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <mesosphere/kresources/KSlabHeap.hpp>
|
||||
#include <mesosphere/threading/KMutex.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
template<typename T>
|
||||
class KObjectAllocator {
|
||||
private:
|
||||
struct Comparator {
|
||||
constexpr bool operator()(const T &lhs, const T &rhs) const
|
||||
{
|
||||
return lhs.GetComparisonKey() < rhs.GetComparisonKey();
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
struct HookTag;
|
||||
|
||||
using AllocatedSetHookType = boost::intrusive::set_base_hook<
|
||||
boost::intrusive::tag<HookTag>,
|
||||
boost::intrusive::link_mode<boost::intrusive::normal_link>
|
||||
>;
|
||||
using AllocatedSetType = typename
|
||||
boost::intrusive::make_set<
|
||||
T,
|
||||
boost::intrusive::base_hook<AllocatedSetHookType>,
|
||||
boost::intrusive::compare<Comparator>
|
||||
>::type;
|
||||
|
||||
using pointer = T *;
|
||||
using const_pointer = const T *;
|
||||
using void_pointer = void *;
|
||||
using const_void_ptr = const void *;
|
||||
using value_type = T;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
|
||||
AllocatedSetType &GetAllocatedSet()
|
||||
{
|
||||
return allocatedSet;
|
||||
}
|
||||
|
||||
KSlabHeap<T> &GetSlabHeap()
|
||||
{
|
||||
return slabHeap;
|
||||
}
|
||||
|
||||
void RegisterObject(T &obj) noexcept
|
||||
{
|
||||
std::lock_guard guard{mutex};
|
||||
allocatedSet.insert(obj);
|
||||
}
|
||||
|
||||
void UnregisterObject(T &obj) noexcept
|
||||
{
|
||||
std::lock_guard guard{mutex};
|
||||
allocatedSet.erase(obj);
|
||||
}
|
||||
|
||||
private:
|
||||
AllocatedSetType allocatedSet{};
|
||||
KSlabHeap<T> slabHeap{};
|
||||
KMutex mutex{};
|
||||
};
|
||||
|
||||
}
|
87
mesosphere/include/mesosphere/kresources/KResourceLimit.hpp
Normal file
87
mesosphere/include/mesosphere/kresources/KResourceLimit.hpp
Normal file
|
@ -0,0 +1,87 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/threading/KConditionVariable.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
class KThread;
|
||||
class KEvent;
|
||||
class KTransferMemory;
|
||||
class KSession;
|
||||
|
||||
class KResourceLimit final :
|
||||
public KAutoObject,
|
||||
public ISetAllocated<KResourceLimit>
|
||||
{
|
||||
public:
|
||||
|
||||
MESOSPHERE_AUTO_OBJECT_TRAITS(AutoObject, ResourceLimit);
|
||||
virtual bool IsAlive() const override { return true; }
|
||||
|
||||
enum class Category : uint {
|
||||
Memory = 0,
|
||||
Threads,
|
||||
Events,
|
||||
TransferMemories,
|
||||
Sessions,
|
||||
|
||||
Max,
|
||||
};
|
||||
|
||||
static constexpr Category GetCategory(KAutoObject::TypeId typeId) {
|
||||
switch (typeId) {
|
||||
case KAutoObject::TypeId::Thread: return Category::Threads;
|
||||
case KAutoObject::TypeId::Event: return Category::Events;
|
||||
case KAutoObject::TypeId::TransferMemory: return Category::TransferMemories;
|
||||
case KAutoObject::TypeId::Session: return Category::Sessions;
|
||||
default: return Category::Max;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T> Category GetCategoryOf()
|
||||
{
|
||||
return GetCategory(T::typeId);
|
||||
}
|
||||
|
||||
static KResourceLimit &GetDefaultInstance() { return defaultInstance; }
|
||||
|
||||
size_t GetCurrentValue(Category category) const;
|
||||
size_t GetLimitValue(Category category) const;
|
||||
size_t GetRemainingValue(Category category) const;
|
||||
|
||||
bool SetLimitValue(Category category, size_t value);
|
||||
|
||||
template<typename Rep, typename Period>
|
||||
bool Reserve(Category category, size_t count, const std::chrono::duration<Rep, Period>& timeout)
|
||||
{
|
||||
return ReserveDetail(category, count, KSystemClock::now() + timeout);
|
||||
}
|
||||
|
||||
void Release(Category category, size_t count, size_t realCount);
|
||||
|
||||
private:
|
||||
|
||||
static KResourceLimit defaultInstance;
|
||||
bool ReserveDetail(Category category, size_t count, const KSystemClock::time_point &timeoutTime);
|
||||
|
||||
// Signed in official kernel
|
||||
size_t limitValues[(size_t)Category::Max] = {};
|
||||
|
||||
// Current value: real value + dangling resources about to be released
|
||||
size_t currentValues[(size_t)Category::Max] = {};
|
||||
size_t realValues[(size_t)Category::Max] = {};
|
||||
|
||||
mutable KConditionVariable condvar{};
|
||||
};
|
||||
|
||||
inline void intrusive_ptr_add_ref(KResourceLimit *obj)
|
||||
{
|
||||
intrusive_ptr_add_ref((KAutoObject *)obj);
|
||||
}
|
||||
|
||||
inline void intrusive_ptr_release(KResourceLimit *obj)
|
||||
{
|
||||
intrusive_ptr_add_ref((KAutoObject *)obj);
|
||||
}
|
||||
}
|
55
mesosphere/include/mesosphere/kresources/KSlabHeap.hpp
Normal file
55
mesosphere/include/mesosphere/kresources/KSlabHeap.hpp
Normal file
|
@ -0,0 +1,55 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/kresources/KSlabStack.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
template<typename T>
|
||||
class KSlabHeap {
|
||||
public:
|
||||
using pointer = T *;
|
||||
using const_pointer = const T *;
|
||||
using void_pointer = void *;
|
||||
using const_void_ptr = const void *;
|
||||
using value_type = T;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
|
||||
private:
|
||||
KSlabStack<T> stack{};
|
||||
size_t capacity = 0;
|
||||
T *bufferStart = nullptr;
|
||||
|
||||
public:
|
||||
T *allocate() noexcept
|
||||
{
|
||||
return stack.pop();
|
||||
}
|
||||
|
||||
void deallocate(T *elem) noexcept
|
||||
{
|
||||
kassert(elem >= bufferStart && elem < bufferStart + capacity);
|
||||
stack.push(elem);
|
||||
}
|
||||
|
||||
constexpr size_t size() const
|
||||
{
|
||||
return capacity;
|
||||
}
|
||||
|
||||
KSlabHeap() noexcept = default;
|
||||
|
||||
void initialize(void *buffer, size_t capacity)
|
||||
{
|
||||
this->capacity = capacity;
|
||||
this->bufferStart = (T *)buffer;
|
||||
stack.initialize(buffer, capacity);
|
||||
}
|
||||
|
||||
KSlabHeap(void *buffer, size_t capacity) noexcept : stack(buffer, capacity), capacity(capacity), bufferStart((T *)buffer)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
}
|
78
mesosphere/include/mesosphere/kresources/KSlabStack.hpp
Normal file
78
mesosphere/include/mesosphere/kresources/KSlabStack.hpp
Normal file
|
@ -0,0 +1,78 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <atomic>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
template<typename T>
|
||||
class KSlabStack {
|
||||
public:
|
||||
using pointer = T *;
|
||||
using const_pointer = const T *;
|
||||
using void_pointer = void *;
|
||||
using const_void_ptr = const void *;
|
||||
using value_type = T;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
|
||||
private:
|
||||
struct Node {
|
||||
Node *next;
|
||||
};
|
||||
|
||||
std::atomic<Node *> head;
|
||||
public:
|
||||
|
||||
void push(T *data) noexcept
|
||||
{
|
||||
Node *newHead = (Node *)data;
|
||||
Node *oldHead = head.load();
|
||||
do {
|
||||
newHead->next = oldHead;
|
||||
} while(!head.compare_exchange_weak(oldHead, newHead));
|
||||
}
|
||||
|
||||
T *pop() noexcept
|
||||
{
|
||||
Node *newHead;
|
||||
Node *oldHead = head.load();
|
||||
if (oldHead == nullptr) {
|
||||
return nullptr;
|
||||
} else {
|
||||
do {
|
||||
newHead = oldHead == nullptr ? oldHead : oldHead->next;
|
||||
} while(!head.compare_exchange_weak(oldHead, newHead));
|
||||
|
||||
return (T *)oldHead;
|
||||
}
|
||||
}
|
||||
|
||||
KSlabStack() noexcept = default;
|
||||
|
||||
// Not reentrant (unlike NN's init function)
|
||||
void initialize(void *buffer, size_t size) noexcept
|
||||
{
|
||||
T *ar = (T *)buffer;
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
Node *ndlast = (Node *)&ar[size - 1];
|
||||
ndlast->next = nullptr;
|
||||
|
||||
for (size_t i = 0; i < size - 1; i++) {
|
||||
Node *nd = (Node *)&ar[i];
|
||||
Node *ndnext = (Node *)&ar[i + 1];
|
||||
nd->next = ndnext;
|
||||
}
|
||||
|
||||
Node *ndfirst = (Node *)&ar[0];
|
||||
head.store(ndfirst);
|
||||
}
|
||||
|
||||
KSlabStack(void *buffer, size_t size) { initialize(buffer, size); }
|
||||
};
|
||||
|
||||
}
|
74
mesosphere/include/mesosphere/processes/KHandleTable.hpp
Normal file
74
mesosphere/include/mesosphere/processes/KHandleTable.hpp
Normal file
|
@ -0,0 +1,74 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <mesosphere/kresources/KAutoObject.hpp>
|
||||
#include <mesosphere/arch/KSpinLock.hpp>
|
||||
#include <array>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
class KThread;
|
||||
class KProcess;
|
||||
|
||||
class KHandleTable final {
|
||||
public:
|
||||
|
||||
static constexpr size_t capacityLimit = 1024;
|
||||
static constexpr Handle selfThreadAlias{0, -1, true};
|
||||
static constexpr Handle selfProcessAlias{1, -1, true};
|
||||
|
||||
template<typename T>
|
||||
SharedPtr<T> Get(Handle handle, bool allowAlias = true) const
|
||||
{
|
||||
if constexpr (std::is_same_v<T, KAutoObject>) {
|
||||
(void)allowAlias;
|
||||
return GetAutoObject(handle);
|
||||
} else if constexpr (std::is_same_v<T, KThread>) {
|
||||
return GetThread(handle, allowAlias);
|
||||
} else if constexpr (std::is_same_v<T, KProcess>) {
|
||||
return GetProcess(handle, allowAlias);
|
||||
} else {
|
||||
return DynamicObjectCast<T>(GetAutoObject(handle));
|
||||
}
|
||||
}
|
||||
|
||||
bool Generate(Handle &out, SharedPtr<KAutoObject> obj);
|
||||
|
||||
/// For deferred-init
|
||||
bool Set(SharedPtr<KAutoObject> obj, Handle handle);
|
||||
|
||||
bool Close(Handle handle);
|
||||
void Destroy();
|
||||
|
||||
constexpr size_t GetNumActive() const { return numActive; }
|
||||
constexpr size_t GetSize() const { return size; }
|
||||
constexpr size_t GetCapacity() const { return capacity; }
|
||||
|
||||
KHandleTable(size_t capacity);
|
||||
~KHandleTable();
|
||||
|
||||
private:
|
||||
|
||||
bool IsValid(Handle handle) const;
|
||||
SharedPtr<KAutoObject> GetAutoObject(Handle handle) const;
|
||||
SharedPtr<KThread> GetThread(Handle handle, bool allowAlias = true) const;
|
||||
SharedPtr<KProcess> GetProcess(Handle handle, bool allowAlias = true) const;
|
||||
|
||||
struct Entry {
|
||||
SharedPtr<KAutoObject> object{};
|
||||
s16 id = 0;
|
||||
};
|
||||
|
||||
std::array<Entry, capacityLimit> entries{};
|
||||
|
||||
// Here the official kernel uses pointer, Yuzu and ourselves are repurposing a field in Entry instead.
|
||||
s16 firstFreeIndex = 0;
|
||||
s16 idCounter = 1;
|
||||
|
||||
u16 numActive = 0, size = 0, capacity = 0;
|
||||
|
||||
mutable KSpinLock spinlock;
|
||||
};
|
||||
|
||||
}
|
43
mesosphere/include/mesosphere/processes/KProcess.hpp
Normal file
43
mesosphere/include/mesosphere/processes/KProcess.hpp
Normal file
|
@ -0,0 +1,43 @@
|
|||
#pragma once
|
||||
|
||||
class KThread;
|
||||
class KResourceLimit;
|
||||
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <mesosphere/kresources/KAutoObject.hpp>
|
||||
#include <mesosphere/interfaces/ISetAllocated.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
class KProcess : public KAutoObject {
|
||||
public:
|
||||
MESOSPHERE_AUTO_OBJECT_TRAITS(AutoObject, Process);
|
||||
|
||||
virtual bool IsAlive() const override { return true; }
|
||||
constexpr long GetSchedulerOperationCount() const { return schedulerOperationCount; }
|
||||
|
||||
void IncrementSchedulerOperationCount() { ++schedulerOperationCount; }
|
||||
void SetLastThreadAndIdleSelectionCount(KThread *thread, ulong idleSelectionCount);
|
||||
|
||||
const SharedPtr<KResourceLimit> &GetResourceLimit() const { return reslimit; }
|
||||
|
||||
private:
|
||||
KThread *lastThreads[MAX_CORES]{nullptr};
|
||||
ulong lastIdleSelectionCount[MAX_CORES]{0};
|
||||
long schedulerOperationCount = -1;
|
||||
|
||||
SharedPtr<KResourceLimit> reslimit{};
|
||||
};
|
||||
|
||||
inline void intrusive_ptr_add_ref(KProcess *obj)
|
||||
{
|
||||
intrusive_ptr_add_ref((KAutoObject *)obj);
|
||||
}
|
||||
|
||||
inline void intrusive_ptr_release(KProcess *obj)
|
||||
{
|
||||
intrusive_ptr_release((KAutoObject *)obj);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
#pragma once
|
||||
|
||||
#include <mesosphere/threading/KThread.hpp>
|
||||
#include <mesosphere/threading/KMutex.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
|
||||
/// Provides an interface similar to std::condition_variable
|
||||
class KConditionVariable final {
|
||||
public:
|
||||
|
||||
using native_handle_type = uiptr;
|
||||
|
||||
KConditionVariable() = default;
|
||||
KConditionVariable(const KConditionVariable &) = delete;
|
||||
KConditionVariable(KConditionVariable &&) = delete;
|
||||
KConditionVariable &operator=(const KConditionVariable &) = delete;
|
||||
KConditionVariable &operator=(KConditionVariable &&) = delete;
|
||||
|
||||
native_handle_type native_handle() { return mutex_.native_handle(); }
|
||||
|
||||
KMutex &mutex() { return mutex_; }
|
||||
|
||||
void wait() noexcept
|
||||
{
|
||||
wait_until_impl(KSystemClock::never);
|
||||
}
|
||||
template<typename Predicate>
|
||||
void wait(Predicate pred)
|
||||
{
|
||||
while (!pred()) {
|
||||
wait();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Clock, typename Duration>
|
||||
void wait_until(const std::chrono::time_point<Clock, Duration> &timeoutPoint) noexcept
|
||||
{
|
||||
wait_until_impl(timeoutPoint);
|
||||
}
|
||||
template<typename Clock, typename Duration, typename Predicate>
|
||||
bool wait_until(const std::chrono::time_point<Clock, Duration> &timeoutPoint, Predicate pred)
|
||||
{
|
||||
while (!pred()) {
|
||||
wait_until(timeoutPoint);
|
||||
if (Clock::now() >= timeoutPoint) {
|
||||
return pred();
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename Rep, typename Period>
|
||||
void wait_for(const std::chrono::duration<Rep, Period>& timeout) noexcept
|
||||
{
|
||||
wait_until(KSystemClock::now() + timeout);
|
||||
}
|
||||
|
||||
template<typename Rep, typename Period, typename Predicate>
|
||||
bool wait_for(const std::chrono::duration<Rep, Period>& timeout, Predicate pred)
|
||||
{
|
||||
return wait_until(KSystemClock::now() + timeout, std::move(pred));
|
||||
}
|
||||
|
||||
void notify_one() noexcept;
|
||||
void notify_all() noexcept;
|
||||
|
||||
private:
|
||||
void wait_until_impl(const KSystemClock::time_point &timeoutPoint) noexcept;
|
||||
|
||||
KMutex mutex_{};
|
||||
KThread::WaitList waiterList{};
|
||||
};
|
||||
|
||||
}
|
340
mesosphere/include/mesosphere/threading/KMultiLevelQueue.hpp
Normal file
340
mesosphere/include/mesosphere/threading/KMultiLevelQueue.hpp
Normal file
|
@ -0,0 +1,340 @@
|
|||
#pragma once
|
||||
|
||||
#include <iterator>
|
||||
#include <boost/intrusive/list.hpp>
|
||||
#include <mesosphere/core/util.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
template<uint depth_, typename IntrusiveListType_, typename PrioGetterType_>
|
||||
class KMultiLevelQueue {
|
||||
static_assert(depth_ <= 64, "Bitfield must be constrained in a u64");
|
||||
public:
|
||||
static constexpr uint depth = depth_;
|
||||
|
||||
using IntrusiveListType = IntrusiveListType_;
|
||||
using PrioGetterType = PrioGetterType_;
|
||||
|
||||
using value_traits = typename IntrusiveListType::value_traits;
|
||||
|
||||
using pointer = typename IntrusiveListType::pointer;
|
||||
using const_pointer = typename IntrusiveListType::const_pointer;
|
||||
using value_type = typename IntrusiveListType::value_type;
|
||||
using reference = typename IntrusiveListType::reference;
|
||||
using const_reference = typename IntrusiveListType::const_reference;
|
||||
using difference_type = typename IntrusiveListType::difference_type;
|
||||
using size_type = typename IntrusiveListType::size_type;
|
||||
|
||||
template<bool isConst>
|
||||
class iterator_impl {
|
||||
public:
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
using value_type = typename KMultiLevelQueue::value_type;
|
||||
using difference_type = typename KMultiLevelQueue::difference_type;
|
||||
using pointer = typename std::conditional<
|
||||
isConst,
|
||||
typename KMultiLevelQueue::const_pointer,
|
||||
typename KMultiLevelQueue::pointer>::type;
|
||||
using reference = typename std::conditional<
|
||||
isConst,
|
||||
typename KMultiLevelQueue::const_reference,
|
||||
typename KMultiLevelQueue::reference>::type;
|
||||
|
||||
bool operator==(const iterator_impl &other) const {
|
||||
return (isEnd() && other.isEnd()) || (it == other.it);
|
||||
}
|
||||
|
||||
bool operator!=(const iterator_impl &other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
reference operator*() {
|
||||
return *it;
|
||||
}
|
||||
|
||||
pointer operator->() {
|
||||
return it.operator->();
|
||||
}
|
||||
|
||||
iterator_impl &operator++() {
|
||||
if (isEnd()) {
|
||||
return *this;
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
if (it == getEndItForPrio()) {
|
||||
u64 prios = mlq.usedPriorities;
|
||||
prios &= ~((1ull << (currentPrio + 1)) - 1);
|
||||
if (prios == 0) {
|
||||
currentPrio = KMultiLevelQueue::depth;
|
||||
} else {
|
||||
currentPrio = __builtin_ffsll(prios) - 1;
|
||||
it = getBeginItForPrio();
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
iterator_impl &operator--() {
|
||||
if (isEnd()) {
|
||||
if (mlq.usedPriorities != 0) {
|
||||
currentPrio = 63 - __builtin_clzll(mlq.usedPriorities);
|
||||
it = getEndItForPrio();
|
||||
--it;
|
||||
}
|
||||
} else if (it == getBeginItForPrio()) {
|
||||
u64 prios = mlq.usedPriorities;
|
||||
prios &= (1ull << currentPrio) - 1;
|
||||
if (prios != 0) {
|
||||
currentPrio = __builtin_ffsll(prios) - 1;
|
||||
it = getEndItForPrio();
|
||||
--it;
|
||||
}
|
||||
} else {
|
||||
--it;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
iterator_impl &operator++(int) {
|
||||
const iterator_impl v{*this};
|
||||
++(*this);
|
||||
return v;
|
||||
}
|
||||
|
||||
iterator_impl &operator--(int) {
|
||||
const iterator_impl v{*this};
|
||||
--(*this);
|
||||
return v;
|
||||
}
|
||||
|
||||
// allow implicit const->non-const
|
||||
iterator_impl(const iterator_impl<false> &other)
|
||||
: mlq(other.mlq), it(other.it), currentPrio(other.currentPrio) {}
|
||||
|
||||
friend class iterator_impl<true>;
|
||||
iterator_impl() = default;
|
||||
private:
|
||||
friend class KMultiLevelQueue;
|
||||
using container_ref = typename std::conditional<
|
||||
isConst,
|
||||
const KMultiLevelQueue &,
|
||||
KMultiLevelQueue &>::type;
|
||||
using list_iterator = typename std::conditional<
|
||||
isConst,
|
||||
typename IntrusiveListType::const_iterator,
|
||||
typename IntrusiveListType::iterator>::type;
|
||||
container_ref mlq;
|
||||
list_iterator it;
|
||||
uint currentPrio;
|
||||
|
||||
explicit iterator_impl(container_ref mlq, list_iterator const &it, uint currentPrio)
|
||||
: mlq(mlq), it(it), currentPrio(currentPrio) {}
|
||||
explicit iterator_impl(container_ref mlq, uint currentPrio)
|
||||
: mlq(mlq), it(), currentPrio(currentPrio) {}
|
||||
constexpr bool isEnd() const {
|
||||
return currentPrio == KMultiLevelQueue::depth;
|
||||
}
|
||||
|
||||
list_iterator getBeginItForPrio() const {
|
||||
if constexpr (isConst) {
|
||||
return mlq.levels[currentPrio].cbegin();
|
||||
} else {
|
||||
return mlq.levels[currentPrio].begin();
|
||||
}
|
||||
}
|
||||
|
||||
list_iterator getEndItForPrio() const {
|
||||
if constexpr (isConst) {
|
||||
return mlq.levels[currentPrio].cend();
|
||||
} else {
|
||||
return mlq.levels[currentPrio].end();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
using iterator = iterator_impl<false>;
|
||||
using const_iterator = iterator_impl<true>;
|
||||
using reverse_iterator = std::reverse_iterator<iterator>;
|
||||
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
|
||||
|
||||
void add(reference r) {
|
||||
uint prio = prioGetter(r);
|
||||
levels[prio].push_back(r);
|
||||
usedPriorities |= 1ul << prio;
|
||||
}
|
||||
|
||||
void remove(const_reference r) {
|
||||
uint prio = prioGetter(r);
|
||||
levels[prio].erase(levels[prio].iterator_to(r));
|
||||
if (levels[prio].empty()) {
|
||||
usedPriorities &= ~(1ul << prio);
|
||||
}
|
||||
}
|
||||
|
||||
void remove(const_iterator it) {
|
||||
remove(*it);
|
||||
}
|
||||
void erase(const_iterator it) {
|
||||
remove(it);
|
||||
}
|
||||
|
||||
void adjust(const_reference r, uint oldPrio, bool isCurrentThread = false) {
|
||||
uint prio = prioGetter(r);
|
||||
|
||||
// The thread is the current thread if and only if it is first on the running queue of highest priority, so it needs to be first on the dst queue as well.
|
||||
auto newnext = isCurrentThread ? levels[prio].cbegin() : levels[prio].cend();
|
||||
levels[prio].splice(newnext, levels[oldPrio], levels[oldPrio].iterator_to(r));
|
||||
|
||||
usedPriorities |= 1ul << prio;
|
||||
}
|
||||
void adjust(const_iterator it, uint oldPrio, bool isCurrentThread = false) {
|
||||
adjust(*it, oldPrio, isCurrentThread);
|
||||
}
|
||||
|
||||
void transferToFront(const_reference r, KMultiLevelQueue &other) {
|
||||
uint prio = prioGetter(r);
|
||||
other.levels[prio].splice(other.levels[prio].begin(), levels[prio], levels[prio].iterator_to(r));
|
||||
other.usedPriorities |= 1ul << prio;
|
||||
if (levels[prio].empty()) {
|
||||
usedPriorities &= ~(1ul << prio);
|
||||
}
|
||||
}
|
||||
|
||||
void transferToFront(const_iterator it, KMultiLevelQueue &other) {
|
||||
transferToFront(*it, other);
|
||||
}
|
||||
|
||||
void transferToBack(const_reference r, KMultiLevelQueue &other) {
|
||||
uint prio = prioGetter(r);
|
||||
other.levels[prio].splice(other.levels[prio].end(), levels[prio], levels[prio].iterator_to(r));
|
||||
other.usedPriorities |= 1ul << prio;
|
||||
if (levels[prio].empty()) {
|
||||
usedPriorities &= ~(1ul << prio);
|
||||
}
|
||||
}
|
||||
|
||||
void transferToBack(const_iterator it, KMultiLevelQueue &other) {
|
||||
transferToBack(*it, other);
|
||||
}
|
||||
|
||||
void yield(uint prio, size_type n = 1) {
|
||||
levels[prio].shift_forward(n);
|
||||
}
|
||||
void yield(const_reference r) {
|
||||
uint prio = prioGetter(r);
|
||||
if (&r == &levels[prio].front()) {
|
||||
yield(prio, 1);
|
||||
}
|
||||
}
|
||||
|
||||
uint highestPrioritySet(uint maxPrio = 0) {
|
||||
u64 priorities = maxPrio == 0 ? usedPriorities : (usedPriorities & ~((1 << maxPrio) - 1));
|
||||
return priorities == 0 ? depth : (uint)(__builtin_ffsll((long long)priorities) - 1);
|
||||
}
|
||||
|
||||
uint lowestPrioritySet(uint minPrio = depth - 1) {
|
||||
u64 priorities = minPrio >= depth - 1 ? usedPriorities : (usedPriorities & ((1 << (minPrio + 1)) - 1));
|
||||
return priorities == 0 ? depth : 63 - __builtin_clzll(priorities);
|
||||
}
|
||||
|
||||
size_type size(uint prio) const {
|
||||
return levels[prio].size();
|
||||
}
|
||||
bool empty(uint prio) const {
|
||||
return (usedPriorities & (1 << prio)) == 0;
|
||||
}
|
||||
|
||||
size_type size() const {
|
||||
u64 prios = usedPriorities;
|
||||
size_type sz = 0;
|
||||
while (prios != 0) {
|
||||
int ffs = __builtin_ffsll(prios);
|
||||
sz += size((uint)ffs - 1);
|
||||
prios &= ~(1ull << (ffs - 1));
|
||||
}
|
||||
|
||||
return sz;
|
||||
}
|
||||
bool empty() const {
|
||||
return usedPriorities == 0;
|
||||
}
|
||||
|
||||
reference front(uint maxPrio = 0) {
|
||||
// Undefined behavior if empty
|
||||
uint priority = highestPrioritySet(maxPrio);
|
||||
return levels[priority == depth ? 0 : priority].front();
|
||||
}
|
||||
const_reference front(uint maxPrio = 0) const {
|
||||
// Undefined behavior if empty
|
||||
uint priority = highestPrioritySet(maxPrio);
|
||||
return levels[priority == depth ? 0 : priority].front();
|
||||
}
|
||||
|
||||
reference back(uint minPrio = depth - 1) {
|
||||
// Inclusive
|
||||
// Undefined behavior if empty
|
||||
uint priority = highestPrioritySet(minPrio); // intended
|
||||
return levels[priority == depth ? 63 : priority].back();
|
||||
}
|
||||
const_reference back(uint minPrio = KMultiLevelQueue::depth - 1) const {
|
||||
// Inclusive
|
||||
// Undefined behavior if empty
|
||||
uint priority = highestPrioritySet(minPrio); // intended
|
||||
return levels[priority == depth ? 63 : priority].back();
|
||||
}
|
||||
|
||||
const_iterator cbegin(uint maxPrio = 0) const {
|
||||
uint priority = highestPrioritySet(maxPrio);
|
||||
return priority == depth ? cend() : const_iterator{*this, levels[priority].cbegin(), priority};
|
||||
}
|
||||
iterator begin(uint maxPrio = 0) const {
|
||||
return cbegin(maxPrio);
|
||||
}
|
||||
iterator begin(uint maxPrio = 0) {
|
||||
uint priority = highestPrioritySet(maxPrio);
|
||||
return priority == depth ? end() : iterator{*this, levels[priority].begin(), priority};
|
||||
}
|
||||
|
||||
const_iterator cend(uint minPrio = depth - 1) const {
|
||||
return minPrio == depth - 1 ? const_iterator{*this, depth} : cbegin(minPrio + 1);
|
||||
}
|
||||
const_iterator end(uint minPrio = depth - 1) const {
|
||||
return cend(minPrio);
|
||||
}
|
||||
iterator end(uint minPrio = depth - 1) {
|
||||
return minPrio == depth - 1 ? iterator{*this, depth} : begin(minPrio + 1);
|
||||
}
|
||||
|
||||
const_reverse_iterator crbegin(uint maxPrio = 0) const {
|
||||
return const_reverse_iterator(cbegin(maxPrio));
|
||||
}
|
||||
const_reverse_iterator rbegin(uint maxPrio = 0) const {
|
||||
return crbegin(maxPrio);
|
||||
}
|
||||
reverse_iterator rbegin(uint maxPrio = 0) {
|
||||
return reverse_iterator(begin(maxPrio));
|
||||
}
|
||||
|
||||
const_reverse_iterator crend(uint minPrio = KMultiLevelQueue::depth - 1) const {
|
||||
return const_reverse_iterator(cend(minPrio));
|
||||
}
|
||||
const_reverse_iterator rend(uint minPrio = KMultiLevelQueue::depth - 1) const {
|
||||
return crend(minPrio);
|
||||
}
|
||||
reverse_iterator rend(uint minPrio = KMultiLevelQueue::depth - 1) {
|
||||
return reverse_iterator(end(minPrio));
|
||||
}
|
||||
|
||||
KMultiLevelQueue(PrioGetterType prioGetter) : prioGetter(prioGetter), usedPriorities(0), levels() {};
|
||||
explicit KMultiLevelQueue(const value_traits &traits, PrioGetterType prioGetter = PrioGetterType{})
|
||||
: prioGetter(prioGetter), usedPriorities(0), levels(detail::MakeArrayOf<IntrusiveListType, depth>(traits)) {}
|
||||
|
||||
private:
|
||||
PrioGetterType prioGetter;
|
||||
u64 usedPriorities;
|
||||
std::array<IntrusiveListType, depth> levels;
|
||||
};
|
||||
|
||||
}
|
93
mesosphere/include/mesosphere/threading/KMutex.hpp
Normal file
93
mesosphere/include/mesosphere/threading/KMutex.hpp
Normal file
|
@ -0,0 +1,93 @@
|
|||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <mesosphere/core/KCoreContext.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
/// Fulfills Mutex requirements
|
||||
class KMutex final {
|
||||
public:
|
||||
|
||||
using native_handle_type = uiptr;
|
||||
|
||||
KMutex() = default;
|
||||
KMutex(const KMutex &) = delete;
|
||||
KMutex(KMutex &&) = delete;
|
||||
KMutex &operator=(const KMutex &) = delete;
|
||||
KMutex &operator=(KMutex &&) = delete;
|
||||
|
||||
native_handle_type native_handle() { return tag; }
|
||||
|
||||
bool try_lock()
|
||||
{
|
||||
KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread();
|
||||
return try_lock_impl_get_owner(currentThread) == nullptr;
|
||||
}
|
||||
|
||||
void lock()
|
||||
{
|
||||
KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread();
|
||||
KThread *owner;
|
||||
|
||||
while ((owner = try_lock_impl_get_owner(currentThread)) != nullptr) {
|
||||
// Our thread may be resumed even if we weren't given the mutex
|
||||
lock_slow_path(*owner, *currentThread);
|
||||
}
|
||||
}
|
||||
|
||||
void unlock()
|
||||
{
|
||||
// Ensure sequencial ordering, to happen-after mutex load
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
|
||||
KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread();
|
||||
native_handle_type thisThread = (native_handle_type)currentThread;
|
||||
|
||||
/*
|
||||
If we don't have any waiter, just store 0 (free the mutex).
|
||||
Otherwise, or if a race condition happens and a new waiter appears,
|
||||
take the slow path.
|
||||
*/
|
||||
if (tag.load() != thisThread || !tag.compare_exchange_strong(thisThread, 0)) {
|
||||
unlock_slow_path(*currentThread);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
KThread *try_lock_impl_get_owner(KThread *currentThread)
|
||||
{
|
||||
native_handle_type oldTag, newTag;
|
||||
native_handle_type thisThread = (native_handle_type)currentThread;
|
||||
|
||||
oldTag = tag.load();
|
||||
do {
|
||||
// Add "has listener" if the mutex was not free
|
||||
newTag = oldTag == 0 ? thisThread : (oldTag | 1);
|
||||
} while (!tag.compare_exchange_weak(oldTag, newTag, std::memory_order_seq_cst));
|
||||
|
||||
// The mutex was not free or was not ours => return false
|
||||
if(oldTag != 0 && (oldTag & ~1) != thisThread) {
|
||||
return (KThread *)(oldTag &~ 1);
|
||||
} else {
|
||||
/*
|
||||
Ensure sequencial ordering if mutex was acquired
|
||||
and mutex lock happens-before mutex unlock.
|
||||
*/
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void lock_slow_path(KThread &owner, KThread &requester);
|
||||
void unlock_slow_path(KThread &owner);
|
||||
std::atomic<native_handle_type> tag{};
|
||||
};
|
||||
|
||||
}
|
130
mesosphere/include/mesosphere/threading/KScheduler.hpp
Normal file
130
mesosphere/include/mesosphere/threading/KScheduler.hpp
Normal file
|
@ -0,0 +1,130 @@
|
|||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <mesosphere/threading/KMultiLevelQueue.hpp>
|
||||
#include <mesosphere/threading/KThread.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
//TODO
|
||||
struct KCriticalSection { void lock() {} void unlock() {} bool try_lock() {return true;} };
|
||||
|
||||
class KScheduler {
|
||||
public:
|
||||
class Global {
|
||||
public:
|
||||
using MlqType = KMultiLevelQueue<64, KThread::SchedulerList, __decltype(&KThread::GetPriorityOf)>;
|
||||
Global() = delete;
|
||||
Global(const Global &) = delete;
|
||||
Global(Global &&) = delete;
|
||||
Global &operator=(const Global &) = delete;
|
||||
Global &operator=(Global &&) = delete;
|
||||
|
||||
static MlqType &GetScheduledMlq(uint coreId) { return scheduledMlqs[coreId]; }
|
||||
static MlqType &GetSuggestedMlq(uint coreId) { return suggestedMlqs[coreId]; }
|
||||
|
||||
static void SetThreadRunning(KThread &thread);
|
||||
static void SetThreadPaused(KThread &thread);
|
||||
static void AdjustThreadPriorityChanged(KThread &thread, uint oldPrio, bool isCurrentThread = false);
|
||||
static void AdjustThreadAffinityChanged(KThread &thread, int oldCoreId, u64 oldAffinityMask);
|
||||
static void YieldThread(KThread &thread);
|
||||
static void YieldThreadAndBalanceLoad(KThread &thread);
|
||||
static void YieldThreadAndWaitForLoadBalancing(KThread &thread);
|
||||
|
||||
static void YieldPreemptThread(KThread ¤tKernelHandlerThread, uint coreId, uint maxPrio = 59);
|
||||
|
||||
static void SelectThreads();
|
||||
|
||||
static constexpr uint minRegularPriority = 2;
|
||||
private:
|
||||
static void TransferThreadToCore(KThread &thread, int coreId);
|
||||
static void AskForReselectionOrMarkRedundant(KThread *currentThread, KThread *winner);
|
||||
|
||||
// allowSecondPass = true is only used in SelectThreads
|
||||
static KThread *PickOneSuggestedThread(const std::array<KThread *, MAX_CORES> ¤tThreads,
|
||||
uint coreId, bool compareTime = false, bool allowSecondPass = false,
|
||||
uint maxPrio = 0, uint minPrio = MlqType::depth - 1);
|
||||
|
||||
static bool reselectionRequired;
|
||||
static std::array<MlqType, MAX_CORES> scheduledMlqs, suggestedMlqs;
|
||||
|
||||
template<typename F, typename ...Args>
|
||||
static void ApplyReschedulingOperationImpl(F f, KThread &thread, int coreId, u64 affMask, Args&& ...args)
|
||||
{
|
||||
if (coreId >= 0) {
|
||||
f(scheduledMlqs[coreId], thread, std::forward<Args>(args)...);
|
||||
affMask &= ~(1 << coreId);
|
||||
}
|
||||
|
||||
while (affMask != 0) {
|
||||
coreId = __builtin_ffsll(affMask) - 1;
|
||||
f(suggestedMlqs[coreId], thread, std::forward<Args>(args)...);
|
||||
affMask &= ~(1 << coreId);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename F, typename ...Args>
|
||||
static void ApplyReschedulingOperation(F f, KThread &thread, Args&& ...args)
|
||||
{
|
||||
u64 aff = thread.GetAffinityMask();
|
||||
int coreId = thread.GetCurrentCoreId();
|
||||
|
||||
ApplyReschedulingOperationImpl(f, thread, coreId, aff, std::forward<Args>(args)...);
|
||||
|
||||
thread.IncrementSchedulerOperationCount();
|
||||
reselectionRequired = true;
|
||||
}
|
||||
};
|
||||
|
||||
KScheduler() = default;
|
||||
KScheduler(const KScheduler &) = delete;
|
||||
KScheduler(KScheduler &&) = delete;
|
||||
KScheduler &operator=(const KScheduler &) = delete;
|
||||
KScheduler &operator=(KScheduler &&) = delete;
|
||||
|
||||
static KCriticalSection &GetCriticalSection() { return criticalSection; }
|
||||
|
||||
static void YieldCurrentThread();
|
||||
static void YieldCurrentThreadAndBalanceLoad();
|
||||
static void YieldCurrentThreadAndWaitForLoadBalancing();
|
||||
|
||||
|
||||
void ForceContextSwitch() {}
|
||||
void ForceContextSwitchAfterIrq() {}
|
||||
|
||||
void SetContextSwitchNeededForWorkQueue() { isContextSwitchNeededForWorkQueue = true; }
|
||||
|
||||
constexpr ulong GetIdleSelectionCount() const { return idleSelectionCount; }
|
||||
constexpr bool IsActive() const { return /*isActive */ true; } // TODO
|
||||
private:
|
||||
bool hasContextSwitchStartedAfterIrq;
|
||||
bool isActive;
|
||||
bool isContextSwitchNeeded;
|
||||
bool isContextSwitchNeededForWorkQueue;
|
||||
uint coreId;
|
||||
u64 lastContextSwitchTime;
|
||||
KThread *selectedThread;
|
||||
KThread *previousThread;
|
||||
ulong idleSelectionCount;
|
||||
|
||||
void *tmpStack;
|
||||
KThread *idleThread;
|
||||
|
||||
static KCriticalSection criticalSection;
|
||||
|
||||
template<typename F>
|
||||
void DoYieldOperation(F f, KThread ¤tThread)
|
||||
{
|
||||
if (!currentThread.IsSchedulerOperationRedundant())
|
||||
{
|
||||
criticalSection.lock();
|
||||
f(currentThread);
|
||||
criticalSection.unlock();
|
||||
ForceContextSwitch();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
261
mesosphere/include/mesosphere/threading/KThread.hpp
Normal file
261
mesosphere/include/mesosphere/threading/KThread.hpp
Normal file
|
@ -0,0 +1,261 @@
|
|||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <boost/intrusive/list.hpp>
|
||||
#include <mesosphere/core/util.hpp>
|
||||
#include <mesosphere/processes/KProcess.hpp>
|
||||
#include <mesosphere/interfaces/IAlarmable.hpp>
|
||||
#include <mesosphere/interfaces/ILimitedResource.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
struct ThreadWaitListTag;
|
||||
struct ThreadMutexWaitListTag;
|
||||
using ThreadWaitListBaseHook = boost::intrusive::list_base_hook<boost::intrusive::tag<ThreadWaitListTag> >;
|
||||
using ThreadMutexWaitListBaseHook = boost::intrusive::list_base_hook<boost::intrusive::tag<ThreadMutexWaitListTag> >;
|
||||
|
||||
class KThread final :
|
||||
public KAutoObject,
|
||||
public ILimitedResource<KThread>,
|
||||
public ISetAllocated<KThread>,
|
||||
public IAlarmable,
|
||||
public ThreadWaitListBaseHook,
|
||||
public ThreadMutexWaitListBaseHook
|
||||
{
|
||||
public:
|
||||
|
||||
MESOSPHERE_AUTO_OBJECT_TRAITS(AutoObject, Thread);
|
||||
virtual bool IsAlive() const override;
|
||||
|
||||
virtual void OnAlarm() override;
|
||||
|
||||
struct SchedulerValueTraits {
|
||||
using node_traits = boost::intrusive::list_node_traits<KThread *>;
|
||||
using node = node_traits::node;
|
||||
using node_ptr = node *;
|
||||
using const_node_ptr = const node *;
|
||||
using value_type = KThread;
|
||||
using pointer = KThread *;
|
||||
using const_pointer = const KThread *;
|
||||
static constexpr boost::intrusive::link_mode_type link_mode = boost::intrusive::normal_link;
|
||||
|
||||
constexpr SchedulerValueTraits(uint coreId) : coreId(coreId) {}
|
||||
node_ptr to_node_ptr (value_type &value) const {
|
||||
return &value.schedulerNodes[coreId];
|
||||
}
|
||||
const_node_ptr to_node_ptr (const value_type &value) const {
|
||||
return &value.schedulerNodes[coreId];
|
||||
}
|
||||
pointer to_value_ptr(node_ptr n) const {
|
||||
return detail::GetParentFromArrayMember(n, coreId, &KThread::schedulerNodes);
|
||||
}
|
||||
const_pointer to_value_ptr(const_node_ptr n) const {
|
||||
return detail::GetParentFromArrayMember(n, coreId, &KThread::schedulerNodes);
|
||||
}
|
||||
|
||||
private:
|
||||
uint coreId;
|
||||
};
|
||||
|
||||
enum class SchedulingStatus : u16 {
|
||||
Paused = 1,
|
||||
Running = 2,
|
||||
Exited = 3,
|
||||
};
|
||||
|
||||
enum class ForcePauseReason : u16 {
|
||||
ThreadActivity = 0,
|
||||
ProcessActivity = 1,
|
||||
Debug = 2,
|
||||
Reserved = 3,
|
||||
KernelLoading = 4,
|
||||
};
|
||||
|
||||
using SchedulerList = typename boost::intrusive::make_list<
|
||||
KThread,
|
||||
boost::intrusive::value_traits<KThread::SchedulerValueTraits>
|
||||
>::type;
|
||||
|
||||
using WaitList = typename boost::intrusive::make_list<
|
||||
KThread,
|
||||
boost::intrusive::base_hook<ThreadWaitListBaseHook>,
|
||||
boost::intrusive::constant_time_size<false>
|
||||
>::type;
|
||||
|
||||
private:
|
||||
using MutexWaitList = typename boost::intrusive::make_list<
|
||||
KThread,
|
||||
boost::intrusive::base_hook<ThreadMutexWaitListBaseHook>
|
||||
>::type;
|
||||
|
||||
public:
|
||||
|
||||
static constexpr uint GetPriorityOf(const KThread &thread)
|
||||
{
|
||||
return thread.priority;
|
||||
}
|
||||
|
||||
constexpr uint GetPriority() const { return priority; }
|
||||
constexpr u64 GetId() const { return id; }
|
||||
constexpr int GetCurrentCoreId() const { return currentCoreId; }
|
||||
constexpr ulong GetAffinityMask() const { return affinityMask; }
|
||||
constexpr long GetLastScheduledTime() const { return lastScheduledTime; }
|
||||
|
||||
KProcess *GetOwner() const { return owner; }
|
||||
bool IsSchedulerOperationRedundant() const { return owner != nullptr && owner->GetSchedulerOperationCount() == redundantSchedulerOperationCount; }
|
||||
|
||||
void IncrementSchedulerOperationCount() { if (owner != nullptr) owner->IncrementSchedulerOperationCount(); }
|
||||
void SetRedundantSchedulerOperation() { redundantSchedulerOperationCount = owner != nullptr ? owner->GetSchedulerOperationCount() : redundantSchedulerOperationCount; }
|
||||
void SetCurrentCoreId(int coreId) { currentCoreId = coreId; }
|
||||
|
||||
void SetProcessLastThreadAndIdleSelectionCount(ulong idleSelectionCount)
|
||||
{
|
||||
if (owner != nullptr) {
|
||||
owner->SetLastThreadAndIdleSelectionCount(this, idleSelectionCount);
|
||||
}
|
||||
}
|
||||
|
||||
void UpdateLastScheduledTime() { ++lastScheduledTime; /* FIXME */}
|
||||
|
||||
constexpr SchedulingStatus GetSchedulingStatus() const
|
||||
{
|
||||
return (SchedulingStatus)(currentSchedMaskFull & 0xF);
|
||||
}
|
||||
constexpr bool IsForcePausedFor(ForcePauseReason reason) const
|
||||
{
|
||||
return (schedMaskForForcePauseFull & (1 << (4 + ((ushort)reason)))) != 0;
|
||||
}
|
||||
constexpr bool IsForcePaused() const
|
||||
{
|
||||
return (schedMaskForForcePauseFull & ~0xF) != 0;
|
||||
}
|
||||
static constexpr bool CompareSchedulingStatusFull(ushort fullMask, SchedulingStatus status)
|
||||
{
|
||||
return fullMask == (ushort)status;
|
||||
}
|
||||
constexpr bool CompareSchedulingStatusFull(SchedulingStatus status) const
|
||||
{
|
||||
return CompareSchedulingStatusFull(schedMaskForForcePauseFull, status);
|
||||
}
|
||||
|
||||
/// Returns old full mask
|
||||
ushort SetSchedulingStatusField(SchedulingStatus status)
|
||||
{
|
||||
ushort oldMaskFull = currentSchedMaskFull;
|
||||
currentSchedMaskFull = (currentSchedMaskFull & ~0xF) | ((ushort)status & 0xF);
|
||||
return oldMaskFull;
|
||||
}
|
||||
void AddForcePauseReasonToField(ForcePauseReason reason)
|
||||
{
|
||||
schedMaskForForcePauseFull |= 1 << (4 + ((ushort)reason));
|
||||
}
|
||||
void RemoveForcePauseReasonToField(ForcePauseReason reason)
|
||||
{
|
||||
schedMaskForForcePauseFull |= ~(1 << (4 + ((ushort)reason)));
|
||||
}
|
||||
|
||||
ushort CommitForcePauseToField()
|
||||
{
|
||||
|
||||
ushort oldMaskFull = currentSchedMaskFull;
|
||||
currentSchedMaskFull = (schedMaskForForcePauseFull & ~0xF) | (currentSchedMaskFull & 0xF);
|
||||
return oldMaskFull;
|
||||
}
|
||||
ushort RevertForcePauseToField()
|
||||
{
|
||||
ushort oldMaskFull = currentSchedMaskFull;
|
||||
currentSchedMaskFull &= 0xF;
|
||||
return oldMaskFull;
|
||||
}
|
||||
|
||||
void AdjustScheduling(ushort oldMaskFull);
|
||||
void Reschedule(SchedulingStatus newStatus);
|
||||
/// Sets status regardless of force-pausing.
|
||||
void RescheduleIfStatusEquals(SchedulingStatus expectedStatus, SchedulingStatus newStatus);
|
||||
void AddForcePauseReason(ForcePauseReason reason);
|
||||
void RemoveForcePauseReason(ForcePauseReason reason);
|
||||
|
||||
bool IsDying() const
|
||||
{
|
||||
// Or already dead
|
||||
/*
|
||||
terminationWanted is only set on exit, under scheduler critical section, to true,
|
||||
and the readers are either a thread under critical section (most common), or end-of-irq/svc/other exception,
|
||||
therefore synchronization outside critsec can be implemented through fences, I think
|
||||
*/
|
||||
return CompareSchedulingStatusFull(SchedulingStatus::Exited) || terminationWanted;
|
||||
}
|
||||
|
||||
void SetTerminationWanted()
|
||||
{
|
||||
terminationWanted = true;
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
}
|
||||
|
||||
/// Takes effect when critical section is left
|
||||
bool WaitForKernelSync(WaitList &waitList);
|
||||
/// Takes effect when critical section is left
|
||||
void ResumeFromKernelSync();
|
||||
/// Takes effect when critical section is left -- all threads in waitlist
|
||||
static void ResumeAllFromKernelSync(WaitList &waitList);
|
||||
/// Takes effect immediately
|
||||
void CancelKernelSync();
|
||||
/// Takes effect immediately
|
||||
void CancelKernelSync(Result res);
|
||||
|
||||
constexpr size_t GetNumberOfKMutexWaiters() const { return numKernelMutexWaiters; }
|
||||
constexpr uiptr GetWantedMutex() const { return wantedMutex; }
|
||||
void SetWantedMutex(uiptr mtx) { wantedMutex = mtx; }
|
||||
|
||||
void AddMutexWaiter(KThread &waiter);
|
||||
KThread *RelinquishMutex(size_t *count, uiptr mutexAddr);
|
||||
void RemoveMutexWaiter(KThread &waiter);
|
||||
void InheritDynamicPriority();
|
||||
|
||||
KThread() = default;
|
||||
KThread(KProcess *owner, u64 id, uint priority) : KAutoObject(), owner(owner), schedulerNodes(),
|
||||
id(id), basePriority(priority), priority(priority),
|
||||
currentCoreId(0), affinityMask(15) {};
|
||||
private:
|
||||
void AddToMutexWaitList(KThread &thread);
|
||||
MutexWaitList::iterator RemoveFromMutexWaitList(MutexWaitList::const_iterator it);
|
||||
void RemoveFromMutexWaitList(const KThread &t);
|
||||
|
||||
KProcess *owner = nullptr;
|
||||
|
||||
boost::intrusive::list_node_traits<KThread *>::node schedulerNodes[4]{};
|
||||
|
||||
WaitList *currentWaitList = nullptr;
|
||||
|
||||
u64 id = 0;
|
||||
long redundantSchedulerOperationCount = 0;
|
||||
ushort currentSchedMaskFull = (ushort)SchedulingStatus::Paused;
|
||||
ushort schedMaskForForcePauseFull = 0;
|
||||
bool terminationWanted = false;
|
||||
uint basePriority = 64, priority = 64;
|
||||
int currentCoreId = -1;
|
||||
ulong affinityMask = 0;
|
||||
|
||||
uiptr wantedMutex = 0;
|
||||
KThread *wantedMutexOwner = nullptr;
|
||||
MutexWaitList mutexWaitList{};
|
||||
size_t numKernelMutexWaiters = 0;
|
||||
|
||||
Handle syncResultHandle{};
|
||||
Result syncResult = 0;
|
||||
|
||||
u64 lastScheduledTime = 0;
|
||||
};
|
||||
|
||||
inline void intrusive_ptr_add_ref(KThread *obj)
|
||||
{
|
||||
intrusive_ptr_add_ref((KAutoObject *)obj);
|
||||
}
|
||||
|
||||
inline void intrusive_ptr_release(KThread *obj)
|
||||
{
|
||||
intrusive_ptr_release((KAutoObject *)obj);
|
||||
}
|
||||
|
||||
}
|
11
mesosphere/source/core/KCoreContext.cpp
Normal file
11
mesosphere/source/core/KCoreContext.cpp
Normal file
|
@ -0,0 +1,11 @@
|
|||
#include <mesosphere/core/KCoreContext.hpp>
|
||||
#include <mesosphere/threading/KScheduler.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
static KScheduler scheds[4];
|
||||
|
||||
std::array<KCoreContext, MAX_CORES> KCoreContext::instances{ &scheds[0], &scheds[1], &scheds[2], &scheds[3] };
|
||||
|
||||
}
|
20
mesosphere/source/interfaces/IAlarmable.cpp
Normal file
20
mesosphere/source/interfaces/IAlarmable.cpp
Normal file
|
@ -0,0 +1,20 @@
|
|||
#include <mesosphere/interfaces/IAlarmable.hpp>
|
||||
#include <mesosphere/core/KCoreContext.hpp>
|
||||
#include <mesosphere/interrupts/KAlarm.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
void IAlarmable::SetAlarmTimeImpl(const KSystemClock::time_point &alarmTime)
|
||||
{
|
||||
this->alarmTime = alarmTime;
|
||||
KCoreContext::GetCurrentInstance().GetAlarm()->AddAlarmable(*this);
|
||||
}
|
||||
|
||||
void IAlarmable::ClearAlarm()
|
||||
{
|
||||
KCoreContext::GetCurrentInstance().GetAlarm()->RemoveAlarmable(*this);
|
||||
alarmTime = KSystemClock::time_point{};
|
||||
}
|
||||
|
||||
}
|
12
mesosphere/source/interfaces/IInterruptibleWork.cpp
Normal file
12
mesosphere/source/interfaces/IInterruptibleWork.cpp
Normal file
|
@ -0,0 +1,12 @@
|
|||
#include <mesosphere/interfaces/IInterruptibleWork.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
IWork *IInterruptibleWork::HandleInterrupt(uint interruptId)
|
||||
{
|
||||
(void)interruptId;
|
||||
return (IWork *)this;
|
||||
}
|
||||
|
||||
}
|
26
mesosphere/source/interfaces/ILimitedResource.cpp
Normal file
26
mesosphere/source/interfaces/ILimitedResource.cpp
Normal file
|
@ -0,0 +1,26 @@
|
|||
#include <mesosphere/interfaces/ILimitedResource.hpp>
|
||||
#include <mesosphere/processes/KProcess.hpp>
|
||||
#include <mesosphere/kresources/KResourceLimit.hpp>
|
||||
|
||||
namespace mesosphere::detail
|
||||
{
|
||||
|
||||
void ReleaseResource(const SharedPtr<KResourceLimit> &reslimit, KAutoObject::TypeId typeId, size_t count, size_t realCount)
|
||||
{
|
||||
if (reslimit != nullptr) {
|
||||
reslimit->Release(KResourceLimit::GetCategory(typeId), count, realCount);
|
||||
} else {
|
||||
KResourceLimit::GetDefaultInstance().Release(KResourceLimit::GetCategory(typeId), count, realCount);
|
||||
}
|
||||
}
|
||||
|
||||
void ReleaseResource(const SharedPtr<KProcess> &owner, KAutoObject::TypeId typeId, size_t count, size_t realCount)
|
||||
{
|
||||
if (owner != nullptr) {
|
||||
return ReleaseResource(owner->GetResourceLimit(), typeId, count, realCount);
|
||||
} else {
|
||||
KResourceLimit::GetDefaultInstance().Release(KResourceLimit::GetCategory(typeId), count, realCount);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
58
mesosphere/source/interrupts/KAlarm.cpp
Normal file
58
mesosphere/source/interrupts/KAlarm.cpp
Normal file
|
@ -0,0 +1,58 @@
|
|||
#include <mesosphere/interrupts/KAlarm.hpp>
|
||||
#include <mesosphere/core/KCoreContext.hpp>
|
||||
#include <mesosphere/threading/KScheduler.hpp>
|
||||
#include <mesosphere/arch/KInterruptMaskGuard.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
void KAlarm::AddAlarmable(IAlarmable &alarmable)
|
||||
{
|
||||
std::lock_guard guard{spinlock};
|
||||
alarmables.insert(alarmable);
|
||||
|
||||
KSystemClock::SetAlarm(alarmables.cbegin()->GetAlarmTime());
|
||||
}
|
||||
|
||||
void KAlarm::RemoveAlarmable(const IAlarmable &alarmable)
|
||||
{
|
||||
std::lock_guard guard{spinlock};
|
||||
alarmables.erase(alarmable);
|
||||
|
||||
KSystemClock::SetAlarm(alarmables.cbegin()->GetAlarmTime());
|
||||
}
|
||||
|
||||
void KAlarm::HandleAlarm()
|
||||
{
|
||||
{
|
||||
KCriticalSection &critsec = KScheduler::GetCriticalSection();
|
||||
std::lock_guard criticalSection{critsec};
|
||||
std::lock_guard guard{spinlock};
|
||||
|
||||
KSystemClock::SetInterruptMasked(true); // mask timer interrupt
|
||||
KSystemClock::time_point currentTime = KSystemClock::now(), maxAlarmTime;
|
||||
while (alarmables.begin() != alarmables.end()) {
|
||||
IAlarmable &a = *alarmables.begin();
|
||||
maxAlarmTime = a.alarmTime;
|
||||
if (maxAlarmTime > currentTime) {
|
||||
break;
|
||||
}
|
||||
|
||||
alarmables.erase(a);
|
||||
a.alarmTime = KSystemClock::time_point{};
|
||||
|
||||
a.OnAlarm();
|
||||
}
|
||||
|
||||
if (maxAlarmTime > KSystemClock::time_point{}) {
|
||||
KSystemClock::SetAlarm(maxAlarmTime);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// TODO Reenable interrupt 30
|
||||
KInterruptMaskGuard guard{};
|
||||
}
|
||||
}
|
||||
|
||||
}
|
45
mesosphere/source/interrupts/KWorkQueue.cpp
Normal file
45
mesosphere/source/interrupts/KWorkQueue.cpp
Normal file
|
@ -0,0 +1,45 @@
|
|||
#include <mesosphere/interrupts/KWorkQueue.hpp>
|
||||
#include <mesosphere/core/KCoreContext.hpp>
|
||||
#include <mesosphere/threading/KScheduler.hpp>
|
||||
#include <mesosphere/arch/KInterruptMaskGuard.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
void KWorkQueue::AddWork(IWork &work)
|
||||
{
|
||||
workQueue.push_back(work);
|
||||
KCoreContext::GetCurrentInstance().GetScheduler()->SetContextSwitchNeededForWorkQueue();
|
||||
}
|
||||
|
||||
void KWorkQueue::Initialize()
|
||||
{
|
||||
//handlerThread.reset(new KThread); //TODO!
|
||||
kassert(handlerThread == nullptr);
|
||||
}
|
||||
|
||||
void KWorkQueue::HandleWorkQueue()
|
||||
{
|
||||
KCoreContext &cctx = KCoreContext::GetCurrentInstance();
|
||||
while (true) {
|
||||
IWork *work = nullptr;
|
||||
do {
|
||||
KInterruptMaskGuard imguard{};
|
||||
auto it = workQueue.begin();
|
||||
if (it != workQueue.end()) {
|
||||
work = &*it;
|
||||
workQueue.erase(it);
|
||||
} else {
|
||||
{
|
||||
//TODO: thread usercontext scheduler/bottom hard guard
|
||||
cctx.GetCurrentThread()->Reschedule(KThread::SchedulingStatus::Paused);
|
||||
}
|
||||
cctx.GetScheduler()->ForceContextSwitch();
|
||||
}
|
||||
} while (work == nullptr);
|
||||
|
||||
work->DoWork();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
10
mesosphere/source/kresources/KAutoObject.cpp
Normal file
10
mesosphere/source/kresources/KAutoObject.cpp
Normal file
|
@ -0,0 +1,10 @@
|
|||
#include <mesosphere/kresources/KAutoObject.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
KAutoObject::~KAutoObject()
|
||||
{
|
||||
}
|
||||
|
||||
}
|
83
mesosphere/source/kresources/KResourceLimit.cpp
Normal file
83
mesosphere/source/kresources/KResourceLimit.cpp
Normal file
|
@ -0,0 +1,83 @@
|
|||
#include <mesosphere/kresources/KResourceLimit.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
KResourceLimit KResourceLimit::defaultInstance{};
|
||||
|
||||
size_t KResourceLimit::GetCurrentValue(KResourceLimit::Category category) const
|
||||
{
|
||||
// Caller should check category
|
||||
std::lock_guard guard{condvar.mutex()};
|
||||
return currentValues[(uint)category];
|
||||
}
|
||||
|
||||
size_t KResourceLimit::GetLimitValue(KResourceLimit::Category category) const
|
||||
{
|
||||
// Caller should check category
|
||||
std::lock_guard guard{condvar.mutex()};
|
||||
return limitValues[(uint)category];
|
||||
}
|
||||
|
||||
size_t KResourceLimit::GetRemainingValue(KResourceLimit::Category category) const
|
||||
{
|
||||
// Caller should check category
|
||||
std::lock_guard guard{condvar.mutex()};
|
||||
return limitValues[(uint)category] - currentValues[(uint)category];
|
||||
}
|
||||
|
||||
bool KResourceLimit::SetLimitValue(KResourceLimit::Category category, size_t value)
|
||||
{
|
||||
std::lock_guard guard{condvar.mutex()};
|
||||
if ((long)value < 0 || currentValues[(uint)category] > value) {
|
||||
return false;
|
||||
} else {
|
||||
limitValues[(uint)category] = value;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
void KResourceLimit::Release(KResourceLimit::Category category, size_t count, size_t realCount)
|
||||
{
|
||||
// Caller should ensure parameters are correct
|
||||
std::lock_guard guard{condvar.mutex()};
|
||||
currentValues[(uint)category] -= count;
|
||||
realValues[(uint)category] -= realCount;
|
||||
condvar.notify_all();
|
||||
}
|
||||
|
||||
bool KResourceLimit::ReserveDetail(KResourceLimit::Category category, size_t count, const KSystemClock::time_point &timeoutTime)
|
||||
{
|
||||
std::lock_guard guard{condvar.mutex()};
|
||||
if ((long)count <= 0 || realValues[(uint)category] >= limitValues[(uint)category]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t newCur = currentValues[(uint)category] + count;
|
||||
bool ok = false;
|
||||
|
||||
auto condition =
|
||||
[=, &newCur] {
|
||||
newCur = this->currentValues[(uint)category] + count;
|
||||
size_t lval = this->limitValues[(uint)category];
|
||||
return this->realValues[(uint)category] <= lval && newCur <= lval; // need to check here
|
||||
};
|
||||
|
||||
if (timeoutTime <= KSystemClock::never) {
|
||||
// TODO, check is actually < 0
|
||||
// TODO timeout
|
||||
ok = true;
|
||||
condvar.wait(condition);
|
||||
} else {
|
||||
ok = condvar.wait_until(timeoutTime, condition);
|
||||
}
|
||||
|
||||
if (ok) {
|
||||
currentValues[(uint)category] += count;
|
||||
realValues[(uint)category] += count;
|
||||
}
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
}
|
1150
mesosphere/source/my_libc.c
Normal file
1150
mesosphere/source/my_libc.c
Normal file
File diff suppressed because it is too large
Load diff
6
mesosphere/source/my_libstdc++.cpp
Normal file
6
mesosphere/source/my_libstdc++.cpp
Normal file
|
@ -0,0 +1,6 @@
|
|||
#include <cstddef>
|
||||
|
||||
void *operator new(std::size_t) { for(;;); }
|
||||
void *operator new[](std::size_t) { for(;;); }
|
||||
void operator delete(void *) { }
|
||||
void operator delete[](void *) { }
|
133
mesosphere/source/processes/KHandleTable.cpp
Normal file
133
mesosphere/source/processes/KHandleTable.cpp
Normal file
|
@ -0,0 +1,133 @@
|
|||
#include <mutex>
|
||||
#include <algorithm>
|
||||
#include <mesosphere/processes/KHandleTable.hpp>
|
||||
#include <mesosphere/core/KCoreContext.hpp>
|
||||
#include <mesosphere/threading/KThread.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
bool KHandleTable::IsValid(Handle handle) const
|
||||
{
|
||||
// Official kernel checks for nullptr, however this makes the deferred-init logic more difficult.
|
||||
// We use our own, more secure, logic instead, that is, free entries are <= 0.
|
||||
return handle.index < capacity && handle.id > 0 && entries[handle.index].id == handle.id;
|
||||
}
|
||||
|
||||
SharedPtr<KAutoObject> KHandleTable::GetAutoObject(Handle handle) const
|
||||
{
|
||||
if (!handle.IsAliasOrFree()) {
|
||||
// Note: official kernel locks the spinlock here, but we don't need to.
|
||||
return nullptr;
|
||||
} else {
|
||||
std::lock_guard guard{spinlock};
|
||||
return IsValid(handle) ? entries[handle.index].object : nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
SharedPtr<KThread> KHandleTable::GetThread(Handle handle, bool allowAlias) const
|
||||
{
|
||||
if (allowAlias && handle == selfThreadAlias) {
|
||||
return KCoreContext::GetCurrentInstance().GetCurrentThread();
|
||||
} else {
|
||||
return DynamicObjectCast<KThread>(GetAutoObject(handle));
|
||||
}
|
||||
}
|
||||
|
||||
SharedPtr<KProcess> KHandleTable::GetProcess(Handle handle, bool allowAlias) const
|
||||
{
|
||||
if (allowAlias && handle == selfProcessAlias) {
|
||||
return KCoreContext::GetCurrentInstance().GetCurrentProcess();
|
||||
} else {
|
||||
return DynamicObjectCast<KProcess>(GetAutoObject(handle));
|
||||
}
|
||||
}
|
||||
|
||||
bool KHandleTable::Close(Handle handle)
|
||||
{
|
||||
SharedPtr<KAutoObject> tmp{nullptr}; // ensure any potential dtor is called w/o the spinlock being held
|
||||
|
||||
if (handle.IsAliasOrFree()) {
|
||||
return false;
|
||||
} else {
|
||||
std::lock_guard guard{spinlock};
|
||||
if (IsValid(handle)) {
|
||||
entries[-firstFreeIndex].id = firstFreeIndex;
|
||||
firstFreeIndex = -(s16)handle.index;
|
||||
--numActive;
|
||||
tmp = std::move(entries[handle.index].object);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool KHandleTable::Generate(Handle &out, SharedPtr<KAutoObject> obj)
|
||||
{
|
||||
// Note: nullptr is accepted, for deferred-init.
|
||||
|
||||
std::lock_guard guard{spinlock};
|
||||
if (numActive >= capacity) {
|
||||
return false; // caller should return 0xD201
|
||||
}
|
||||
|
||||
// Get/allocate the entry
|
||||
u16 index = (u16)-firstFreeIndex;
|
||||
Entry *e = &entries[-firstFreeIndex];
|
||||
firstFreeIndex = e->id;
|
||||
|
||||
e->id = idCounter;
|
||||
e->object = std::move(obj);
|
||||
|
||||
out.index = index;
|
||||
out.id = e->id;
|
||||
out.isAlias = false;
|
||||
|
||||
size = ++numActive > size ? numActive : size;
|
||||
idCounter = idCounter == 0x7FFF ? 1 : idCounter + 1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool KHandleTable::Set(SharedPtr<KAutoObject> obj, Handle handle)
|
||||
{
|
||||
if (!handle.IsAliasOrFree() && IsValid(handle)) {
|
||||
std::lock_guard guard{spinlock};
|
||||
entries[handle.index].object = std::move(obj);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void KHandleTable::Destroy()
|
||||
{
|
||||
spinlock.lock();
|
||||
u16 capa = capacity;
|
||||
capacity = 0;
|
||||
firstFreeIndex = 0;
|
||||
spinlock.unlock();
|
||||
|
||||
for (u16 i = 0; i < capa; i++) {
|
||||
entries[i].object = nullptr;
|
||||
entries[i].id = -(i + 1);
|
||||
}
|
||||
}
|
||||
|
||||
KHandleTable::KHandleTable(size_t capacity_) : capacity((u16)capacity_)
|
||||
{
|
||||
// Note: caller should check the > case, and return an error in that case!
|
||||
capacity = capacity > capacityLimit || capacity == 0 ? (u16)capacityLimit : capacity;
|
||||
|
||||
u16 capa = capacity;
|
||||
Destroy();
|
||||
capacity = capa;
|
||||
}
|
||||
|
||||
KHandleTable::~KHandleTable()
|
||||
{
|
||||
Destroy();
|
||||
}
|
||||
|
||||
}
|
14
mesosphere/source/processes/KProcess.cpp
Normal file
14
mesosphere/source/processes/KProcess.cpp
Normal file
|
@ -0,0 +1,14 @@
|
|||
#include <mesosphere/processes/KProcess.hpp>
|
||||
#include <mesosphere/threading/KThread.hpp>
|
||||
#include <mesosphere/kresources/KResourceLimit.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
void KProcess::SetLastThreadAndIdleSelectionCount(KThread *thread, ulong idleSelectionCount)
|
||||
{
|
||||
lastThreads[thread->GetCurrentCoreId()] = thread;
|
||||
lastIdleSelectionCount[thread->GetCurrentCoreId()] = idleSelectionCount;
|
||||
}
|
||||
|
||||
}
|
10
mesosphere/source/test.cpp
Normal file
10
mesosphere/source/test.cpp
Normal file
|
@ -0,0 +1,10 @@
|
|||
int main(void) {
|
||||
for(;;);
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
void _start(void) {
|
||||
main();
|
||||
}
|
||||
}
|
39
mesosphere/source/threading/KConditionVariable.cpp
Normal file
39
mesosphere/source/threading/KConditionVariable.cpp
Normal file
|
@ -0,0 +1,39 @@
|
|||
#include <mesosphere/threading/KConditionVariable.hpp>
|
||||
#include <mesosphere/threading/KScheduler.hpp>
|
||||
#include <mesosphere/core/KCoreContext.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
void KConditionVariable::wait_until_impl(const KSystemClock::time_point &timeoutPoint) noexcept
|
||||
{
|
||||
// Official kernel counts number of waiters, but that isn't necessary
|
||||
{
|
||||
KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread();
|
||||
std::lock_guard guard{KScheduler::GetCriticalSection()};
|
||||
mutex_.unlock();
|
||||
if (currentThread->WaitForKernelSync(waiterList)) {
|
||||
(void)timeoutPoint; //TODO!
|
||||
} else {
|
||||
// Termination
|
||||
}
|
||||
}
|
||||
mutex_.lock();
|
||||
}
|
||||
|
||||
void KConditionVariable::notify_one() noexcept
|
||||
{
|
||||
std::lock_guard guard{KScheduler::GetCriticalSection()};
|
||||
auto t = waiterList.begin();
|
||||
if (t != waiterList.end()) {
|
||||
t->ResumeFromKernelSync();
|
||||
}
|
||||
}
|
||||
|
||||
void KConditionVariable::notify_all() noexcept
|
||||
{
|
||||
std::lock_guard guard{KScheduler::GetCriticalSection()};
|
||||
KThread::ResumeAllFromKernelSync(waiterList);
|
||||
}
|
||||
|
||||
}
|
62
mesosphere/source/threading/KMutex.cpp
Normal file
62
mesosphere/source/threading/KMutex.cpp
Normal file
|
@ -0,0 +1,62 @@
|
|||
#include <mesosphere/threading/KMutex.hpp>
|
||||
#include <mesosphere/threading/KThread.hpp>
|
||||
#include <mesosphere/threading/KScheduler.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
void KMutex::lock_slow_path(KThread &owner, KThread &requester)
|
||||
{
|
||||
// Requester is currentThread most of (all ?) the time
|
||||
KCriticalSection &critsec = KScheduler::GetCriticalSection();
|
||||
std::lock_guard criticalSection{critsec};
|
||||
if (KCoreContext::GetCurrentInstance().GetScheduler()->IsActive()) {
|
||||
requester.SetWantedMutex((uiptr)this);
|
||||
owner.AddMutexWaiter(requester);
|
||||
|
||||
// If the requester is/was running, pause it (sets status even if force-paused).
|
||||
requester.RescheduleIfStatusEquals(KThread::SchedulingStatus::Running, KThread::SchedulingStatus::Paused);
|
||||
|
||||
// If the owner is force-paused, temporarily wake it.
|
||||
if (owner.IsForcePaused()) {
|
||||
owner.AdjustScheduling(owner.RevertForcePauseToField());
|
||||
}
|
||||
|
||||
// Commit scheduler changes NOW.
|
||||
critsec.unlock();
|
||||
critsec.lock();
|
||||
|
||||
/*
|
||||
At this point, mutex ownership has been transferred to requester or another thread (false wake).
|
||||
Make sure the requester, now resumed, isn't in any mutex wait list.
|
||||
*/
|
||||
owner.RemoveMutexWaiter(requester);
|
||||
}
|
||||
}
|
||||
|
||||
void KMutex::unlock_slow_path(KThread &owner)
|
||||
{
|
||||
std::lock_guard criticalSection{KScheduler::GetCriticalSection()};
|
||||
size_t count;
|
||||
KThread *newOwner = owner.RelinquishMutex(&count, (uiptr)this);
|
||||
native_handle_type newTag;
|
||||
|
||||
if (newOwner != nullptr) {
|
||||
// Wake up new owner
|
||||
newTag = (native_handle_type)newOwner | (count > 1 ? 1 : 0);
|
||||
// Sets status even if force-paused.
|
||||
newOwner->RescheduleIfStatusEquals(KThread::SchedulingStatus::Paused, KThread::SchedulingStatus::Running);
|
||||
} else {
|
||||
// Free the mutex.
|
||||
newTag = 0;
|
||||
}
|
||||
|
||||
// Allow previous owner to get back to forced-sleep, if no other thread wants the kmutexes it is holding.
|
||||
if (!owner.IsDying() && owner.GetNumberOfKMutexWaiters() == 0) {
|
||||
owner.AdjustScheduling(owner.CommitForcePauseToField());
|
||||
}
|
||||
|
||||
tag = newTag;
|
||||
}
|
||||
|
||||
}
|
344
mesosphere/source/threading/KScheduler.cpp
Normal file
344
mesosphere/source/threading/KScheduler.cpp
Normal file
|
@ -0,0 +1,344 @@
|
|||
#include <algorithm>
|
||||
#include <atomic>
|
||||
|
||||
#include <mesosphere/threading/KScheduler.hpp>
|
||||
#include <mesosphere/core/KCoreContext.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
namespace {
|
||||
struct MlqTraitsFactory {
|
||||
constexpr KThread::SchedulerValueTraits operator()(size_t i) const
|
||||
{
|
||||
return KThread::SchedulerValueTraits{(uint)i};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
using MlqT = KScheduler::Global::MlqType;
|
||||
|
||||
bool KScheduler::Global::reselectionRequired = false;
|
||||
|
||||
std::array<MlqT, MAX_CORES> KScheduler::Global::scheduledMlqs =
|
||||
detail::MakeArrayWithFactorySequenceOf<MlqT, MlqTraitsFactory, MAX_CORES>(
|
||||
&KThread::GetPriorityOf
|
||||
);
|
||||
|
||||
std::array<MlqT, MAX_CORES> KScheduler::Global::suggestedMlqs =
|
||||
detail::MakeArrayWithFactorySequenceOf<MlqT, MlqTraitsFactory, MAX_CORES>(
|
||||
&KThread::GetPriorityOf
|
||||
);
|
||||
|
||||
|
||||
void KScheduler::Global::SetThreadRunning(KThread &thread)
|
||||
{
|
||||
ApplyReschedulingOperation([](MlqT &mlq, KThread &t){ mlq.add(t); }, thread);
|
||||
}
|
||||
|
||||
void KScheduler::Global::SetThreadPaused(KThread &thread)
|
||||
{
|
||||
ApplyReschedulingOperation([](MlqT &mlq, KThread &t){ mlq.remove(t); }, thread);
|
||||
}
|
||||
|
||||
void KScheduler::Global::AdjustThreadPriorityChanged(KThread &thread, uint oldPrio, bool isCurrentThread)
|
||||
{
|
||||
ApplyReschedulingOperation(
|
||||
[oldPrio, isCurrentThread](MlqT &mlq, KThread &t){
|
||||
mlq.adjust(t, oldPrio, isCurrentThread);
|
||||
}, thread);
|
||||
}
|
||||
|
||||
void KScheduler::Global::AdjustThreadAffinityChanged(KThread &thread, int oldCoreId, u64 oldAffinityMask)
|
||||
{
|
||||
int newCoreId = thread.GetCurrentCoreId();
|
||||
u64 newAffinityMask = thread.GetAffinityMask();
|
||||
|
||||
ApplyReschedulingOperationImpl([](MlqT &mlq, KThread &t){ mlq.remove(t); }, thread, oldCoreId, oldAffinityMask);
|
||||
ApplyReschedulingOperationImpl([](MlqT &mlq, KThread &t){ mlq.add(t); }, thread, newCoreId, newAffinityMask);
|
||||
|
||||
thread.IncrementSchedulerOperationCount();
|
||||
reselectionRequired = true;
|
||||
}
|
||||
|
||||
void KScheduler::Global::TransferThreadToCore(KThread &thread, int coreId)
|
||||
{
|
||||
int currentCoreId = thread.GetCurrentCoreId();
|
||||
|
||||
if (currentCoreId != coreId) {
|
||||
if (currentCoreId != -1) {
|
||||
scheduledMlqs[currentCoreId].transferToBack(thread, suggestedMlqs[currentCoreId]);
|
||||
}
|
||||
|
||||
if (coreId != -1) {
|
||||
suggestedMlqs[coreId].transferToFront(thread, scheduledMlqs[coreId]);
|
||||
}
|
||||
}
|
||||
|
||||
thread.SetCurrentCoreId(coreId);
|
||||
}
|
||||
|
||||
void KScheduler::Global::AskForReselectionOrMarkRedundant(KThread *currentThread, KThread *winner)
|
||||
{
|
||||
if (currentThread == winner) {
|
||||
// Nintendo (not us) has a nullderef bug on currentThread->owner, but which is never triggered.
|
||||
currentThread->SetRedundantSchedulerOperation();
|
||||
} else {
|
||||
reselectionRequired = true;
|
||||
}
|
||||
}
|
||||
|
||||
KThread *KScheduler::Global::PickOneSuggestedThread(const std::array<KThread *, MAX_CORES> &curThreads,
|
||||
uint coreId, bool compareTime, bool allowSecondPass, uint maxPrio, uint minPrio) {
|
||||
if (minPrio < maxPrio) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto hasWorseTime = [coreId, minPrio, compareTime](const KThread &t) {
|
||||
if (!compareTime || scheduledMlqs[coreId].size(minPrio) <= 1 || t.GetPriority() < minPrio) {
|
||||
return false;
|
||||
} else {
|
||||
// Condition means the thread *it would have been scheduled again after the thread
|
||||
return t.GetLastScheduledTime() > scheduledMlqs[coreId].front(minPrio).GetLastScheduledTime();
|
||||
}
|
||||
};
|
||||
|
||||
std::array<uint, MAX_CORES> secondPassCores;
|
||||
size_t numSecondPassCores = 0;
|
||||
|
||||
auto it = std::find_if(
|
||||
suggestedMlqs[coreId].begin(maxPrio),
|
||||
suggestedMlqs[coreId].end(minPrio),
|
||||
[&hasWorseTime, &secondPassCores, &numSecondPassCores, &curThreads](const KThread &t) {
|
||||
int srcCoreId = t.GetCurrentCoreId();
|
||||
//bool worseTime = compareTime && hasWorseTime(t);
|
||||
// break if hasWorse time too
|
||||
if (srcCoreId >= 0) {
|
||||
bool srcHasEphemeralKernThread = scheduledMlqs[srcCoreId].highestPrioritySet() < minRegularPriority;
|
||||
bool isSrcCurT = &t == curThreads[srcCoreId];
|
||||
if (isSrcCurT) {
|
||||
secondPassCores[numSecondPassCores++] = (uint)srcCoreId;
|
||||
}
|
||||
|
||||
// Note, if checkTime official kernel breaks if srcHasEphemeralKernThread
|
||||
// I believe this is a bug
|
||||
if(srcHasEphemeralKernThread || isSrcCurT) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
);
|
||||
|
||||
if (it != suggestedMlqs[coreId].end(minPrio) && (!compareTime || !hasWorseTime(*it))) {
|
||||
return &*it;
|
||||
} else if (allowSecondPass) {
|
||||
// Allow to re-pick a selected thread about to be current, if it doesn't make the core idle
|
||||
auto srcCoreIdPtr = std::find_if(
|
||||
secondPassCores.cbegin(),
|
||||
secondPassCores.cbegin() + numSecondPassCores,
|
||||
[](uint id) {
|
||||
return scheduledMlqs[id].highestPrioritySet() >= minRegularPriority && scheduledMlqs[id].size() > 1;
|
||||
}
|
||||
);
|
||||
|
||||
return srcCoreIdPtr == secondPassCores.cbegin() + numSecondPassCores ? nullptr : &scheduledMlqs[*srcCoreIdPtr].front();
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::Global::YieldThread(KThread ¤tThread)
|
||||
{
|
||||
// Note: caller should use critical section, etc.
|
||||
kassert(currentThread.GetCurrentCoreId() >= 0);
|
||||
uint coreId = (uint)currentThread.GetCurrentCoreId();
|
||||
uint priority = currentThread.GetPriority();
|
||||
|
||||
// Yield the thread
|
||||
scheduledMlqs[coreId].yield(currentThread);
|
||||
currentThread.IncrementSchedulerOperationCount();
|
||||
|
||||
KThread *winner = &scheduledMlqs[coreId].front(priority);
|
||||
AskForReselectionOrMarkRedundant(¤tThread, winner);
|
||||
}
|
||||
|
||||
void KScheduler::Global::YieldThreadAndBalanceLoad(KThread ¤tThread)
|
||||
{
|
||||
// Note: caller should check if !currentThread.IsSchedulerOperationRedundant and use critical section, etc.
|
||||
kassert(currentThread.GetCurrentCoreId() >= 0);
|
||||
uint coreId = (uint)currentThread.GetCurrentCoreId();
|
||||
uint priority = currentThread.GetPriority();
|
||||
|
||||
std::array<KThread *, MAX_CORES> curThreads;
|
||||
for (uint i = 0; i < MAX_CORES; i++) {
|
||||
curThreads[i] = scheduledMlqs[i].empty() ? nullptr : &scheduledMlqs[i].front();
|
||||
}
|
||||
|
||||
// Yield the thread
|
||||
scheduledMlqs[coreId].yield(currentThread);
|
||||
currentThread.IncrementSchedulerOperationCount();
|
||||
|
||||
KThread *winner = PickOneSuggestedThread(curThreads, coreId, true, false, 0, priority);
|
||||
|
||||
if (winner != nullptr) {
|
||||
TransferThreadToCore(*winner, coreId);
|
||||
winner->IncrementSchedulerOperationCount();
|
||||
currentThread.SetRedundantSchedulerOperation();
|
||||
} else {
|
||||
winner = &scheduledMlqs[coreId].front(priority);
|
||||
}
|
||||
|
||||
AskForReselectionOrMarkRedundant(¤tThread, winner);
|
||||
}
|
||||
|
||||
void KScheduler::Global::YieldThreadAndWaitForLoadBalancing(KThread ¤tThread)
|
||||
{
|
||||
// Note: caller should check if !currentThread.IsSchedulerOperationRedundant and use critical section, etc.
|
||||
KThread *winner = nullptr;
|
||||
kassert(currentThread.GetCurrentCoreId() >= 0);
|
||||
uint coreId = (uint)currentThread.GetCurrentCoreId();
|
||||
|
||||
// Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
|
||||
TransferThreadToCore(currentThread, -1);
|
||||
currentThread.IncrementSchedulerOperationCount();
|
||||
|
||||
// If the core is idle, perform load balancing, excluding the threads that have just used this function...
|
||||
if (scheduledMlqs[coreId].empty()) {
|
||||
// Here, "curThreads" is calculated after the ""yield"", unlike yield -1
|
||||
std::array<KThread *, MAX_CORES> curThreads;
|
||||
for (uint i = 0; i < MAX_CORES; i++) {
|
||||
curThreads[i] = scheduledMlqs[i].empty() ? nullptr : &scheduledMlqs[i].front();
|
||||
}
|
||||
|
||||
KThread *winner = PickOneSuggestedThread(curThreads, coreId, false);
|
||||
|
||||
if (winner != nullptr) {
|
||||
TransferThreadToCore(*winner, coreId);
|
||||
winner->IncrementSchedulerOperationCount();
|
||||
} else {
|
||||
winner = ¤tThread;
|
||||
}
|
||||
}
|
||||
|
||||
AskForReselectionOrMarkRedundant(¤tThread, winner);
|
||||
}
|
||||
|
||||
void KScheduler::Global::YieldPreemptThread(KThread ¤tKernelHandlerThread, uint coreId, uint maxPrio)
|
||||
{
|
||||
if (!scheduledMlqs[coreId].empty(maxPrio)) {
|
||||
// Yield the first thread in the level queue
|
||||
scheduledMlqs[coreId].front(maxPrio).IncrementSchedulerOperationCount();
|
||||
scheduledMlqs[coreId].yield(maxPrio);
|
||||
if (scheduledMlqs[coreId].size() > 1) {
|
||||
scheduledMlqs[coreId].front(maxPrio).IncrementSchedulerOperationCount();
|
||||
}
|
||||
}
|
||||
|
||||
// Here, "curThreads" is calculated after the forced yield, unlike yield -1
|
||||
std::array<KThread *, MAX_CORES> curThreads;
|
||||
for (uint i = 0; i < MAX_CORES; i++) {
|
||||
curThreads[i] = scheduledMlqs[i].empty() ? nullptr : &scheduledMlqs[i].front();
|
||||
}
|
||||
|
||||
KThread *winner = PickOneSuggestedThread(curThreads, coreId, true, false, maxPrio, maxPrio);
|
||||
if (winner != nullptr) {
|
||||
TransferThreadToCore(*winner, coreId);
|
||||
winner->IncrementSchedulerOperationCount();
|
||||
}
|
||||
|
||||
for (uint i = 0; i < MAX_CORES; i++) {
|
||||
curThreads[i] = scheduledMlqs[i].empty() ? nullptr : &scheduledMlqs[i].front();
|
||||
}
|
||||
|
||||
// Find first thread which is not the kernel handler thread.
|
||||
auto itFirst = std::find_if(
|
||||
scheduledMlqs[coreId].begin(),
|
||||
scheduledMlqs[coreId].end(),
|
||||
[¤tKernelHandlerThread, coreId](const KThread &t) {
|
||||
return &t != ¤tKernelHandlerThread;
|
||||
}
|
||||
);
|
||||
|
||||
if (itFirst != scheduledMlqs[coreId].end()) {
|
||||
// If under the threshold, do load balancing again
|
||||
winner = PickOneSuggestedThread(curThreads, coreId, true, false, maxPrio, itFirst->GetPriority() - 1);
|
||||
if (winner != nullptr) {
|
||||
TransferThreadToCore(*winner, coreId);
|
||||
winner->IncrementSchedulerOperationCount();
|
||||
}
|
||||
}
|
||||
|
||||
reselectionRequired = true;
|
||||
}
|
||||
|
||||
void KScheduler::Global::SelectThreads()
|
||||
{
|
||||
auto updateThread = [](KThread *thread, KScheduler &sched) {
|
||||
if (thread != sched.selectedThread) {
|
||||
if (thread != nullptr) {
|
||||
thread->IncrementSchedulerOperationCount();
|
||||
thread->UpdateLastScheduledTime();
|
||||
thread->SetProcessLastThreadAndIdleSelectionCount(sched.idleSelectionCount);
|
||||
} else {
|
||||
++sched.idleSelectionCount;
|
||||
}
|
||||
sched.selectedThread = thread;
|
||||
sched.isContextSwitchNeeded = true;
|
||||
}
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
};
|
||||
|
||||
// This maintain the "current thread is on front of queue" invariant
|
||||
std::array<KThread *, MAX_CORES> curThreads;
|
||||
for (uint i = 0; i < MAX_CORES; i++) {
|
||||
KScheduler &sched = *KCoreContext::GetInstance(i).GetScheduler();
|
||||
curThreads[i] = scheduledMlqs[i].empty() ? nullptr : &scheduledMlqs[i].front();
|
||||
updateThread(curThreads[i], sched);
|
||||
}
|
||||
|
||||
// Do some load-balancing. Allow second pass.
|
||||
std::array<KThread *, MAX_CORES> curThreads2 = curThreads;
|
||||
for (uint i = 0; i < MAX_CORES; i++) {
|
||||
if (scheduledMlqs[i].empty()) {
|
||||
KThread *winner = PickOneSuggestedThread(curThreads2, i, false, true);
|
||||
if (winner != nullptr) {
|
||||
curThreads2[i] = winner;
|
||||
TransferThreadToCore(*winner, i);
|
||||
winner->IncrementSchedulerOperationCount();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// See which to-be-current threads have changed & update accordingly
|
||||
for (uint i = 0; i < MAX_CORES; i++) {
|
||||
KScheduler &sched = *KCoreContext::GetInstance(i).GetScheduler();
|
||||
if (curThreads2[i] != curThreads[i]) {
|
||||
updateThread(curThreads2[i], sched);
|
||||
}
|
||||
}
|
||||
reselectionRequired = false;
|
||||
}
|
||||
|
||||
KCriticalSection KScheduler::criticalSection{};
|
||||
|
||||
void KScheduler::YieldCurrentThread()
|
||||
{
|
||||
KCoreContext &cctx = KCoreContext::GetCurrentInstance();
|
||||
cctx.GetScheduler()->DoYieldOperation(Global::YieldThread, *cctx.GetCurrentThread());
|
||||
}
|
||||
|
||||
void KScheduler::YieldCurrentThreadAndBalanceLoad()
|
||||
{
|
||||
KCoreContext &cctx = KCoreContext::GetCurrentInstance();
|
||||
cctx.GetScheduler()->DoYieldOperation(Global::YieldThreadAndBalanceLoad, *cctx.GetCurrentThread());
|
||||
}
|
||||
|
||||
void KScheduler::YieldCurrentThreadAndWaitForLoadBalancing()
|
||||
{
|
||||
KCoreContext &cctx = KCoreContext::GetCurrentInstance();
|
||||
cctx.GetScheduler()->DoYieldOperation(Global::YieldThreadAndWaitForLoadBalancing, *cctx.GetCurrentThread());
|
||||
}
|
||||
|
||||
}
|
237
mesosphere/source/threading/KThread.cpp
Normal file
237
mesosphere/source/threading/KThread.cpp
Normal file
|
@ -0,0 +1,237 @@
|
|||
#include <mutex>
|
||||
#include <atomic>
|
||||
#include <algorithm>
|
||||
|
||||
#include <mesosphere/threading/KThread.hpp>
|
||||
#include <mesosphere/threading/KScheduler.hpp>
|
||||
#include <mesosphere/core/KCoreContext.hpp>
|
||||
|
||||
namespace mesosphere
|
||||
{
|
||||
|
||||
bool KThread::IsAlive() const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
void KThread::OnAlarm()
|
||||
{
|
||||
CancelKernelSync();
|
||||
}
|
||||
|
||||
void KThread::AdjustScheduling(ushort oldMaskFull)
|
||||
{
|
||||
if (currentSchedMaskFull == oldMaskFull) {
|
||||
return;
|
||||
} else if (CompareSchedulingStatusFull(oldMaskFull, SchedulingStatus::Running)) {
|
||||
KScheduler::Global::SetThreadPaused(*this);
|
||||
} else if (CompareSchedulingStatusFull(SchedulingStatus::Running)) {
|
||||
KScheduler::Global::SetThreadRunning(*this);
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::Reschedule(KThread::SchedulingStatus newStatus)
|
||||
{
|
||||
std::lock_guard criticalSection{KScheduler::GetCriticalSection()};
|
||||
AdjustScheduling(SetSchedulingStatusField(newStatus));
|
||||
}
|
||||
|
||||
void KThread::RescheduleIfStatusEquals(SchedulingStatus expectedStatus, SchedulingStatus newStatus)
|
||||
{
|
||||
if(GetSchedulingStatus() == expectedStatus) {
|
||||
Reschedule(newStatus);
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::AddForcePauseReason(KThread::ForcePauseReason reason)
|
||||
{
|
||||
std::lock_guard criticalSection{KScheduler::GetCriticalSection()};
|
||||
|
||||
if (!IsDying()) {
|
||||
AddForcePauseReasonToField(reason);
|
||||
if (numKernelMutexWaiters == 0) {
|
||||
AdjustScheduling(CommitForcePauseToField());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::RemoveForcePauseReason(KThread::ForcePauseReason reason)
|
||||
{
|
||||
std::lock_guard criticalSection{KScheduler::GetCriticalSection()};
|
||||
|
||||
if (!IsDying()) {
|
||||
RemoveForcePauseReasonToField(reason);
|
||||
if (!IsForcePaused() && numKernelMutexWaiters == 0) {
|
||||
AdjustScheduling(CommitForcePauseToField());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool KThread::WaitForKernelSync(KThread::WaitList &waitList)
|
||||
{
|
||||
// Has to be called from critical section
|
||||
currentWaitList = &waitList;
|
||||
Reschedule(SchedulingStatus::Paused);
|
||||
waitList.push_back(*this);
|
||||
if (IsDying()) {
|
||||
// Whoops
|
||||
ResumeFromKernelSync();
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void KThread::ResumeFromKernelSync()
|
||||
{
|
||||
// Has to be called from critical section
|
||||
currentWaitList->erase(currentWaitList->iterator_to(*this));
|
||||
currentWaitList = nullptr;
|
||||
Reschedule(SchedulingStatus::Running);
|
||||
}
|
||||
|
||||
void KThread::ResumeAllFromKernelSync(KThread::WaitList &waitList)
|
||||
{
|
||||
// Has to be called from critical section
|
||||
waitList.clear_and_dispose(
|
||||
[](KThread *t) {
|
||||
t->currentWaitList = nullptr;
|
||||
t->Reschedule(SchedulingStatus::Running);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
void KThread::CancelKernelSync()
|
||||
{
|
||||
std::lock_guard criticalSection{KScheduler::GetCriticalSection()};
|
||||
if (GetSchedulingStatus() == SchedulingStatus::Paused) {
|
||||
// Note: transparent to force-pause
|
||||
if (currentWaitList != nullptr) {
|
||||
ResumeFromKernelSync();
|
||||
} else {
|
||||
Reschedule(SchedulingStatus::Running);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::CancelKernelSync(Result res)
|
||||
{
|
||||
syncResult = res;
|
||||
CancelKernelSync();
|
||||
}
|
||||
|
||||
void KThread::AddToMutexWaitList(KThread &thread)
|
||||
{
|
||||
// TODO: check&increment numKernelMutexWaiters
|
||||
// Ordered list insertion
|
||||
auto it = std::find_if(
|
||||
mutexWaitList.begin(),
|
||||
mutexWaitList.end(),
|
||||
[&thread](const KThread &t) {
|
||||
return t.GetPriority() > thread.GetPriority();
|
||||
}
|
||||
);
|
||||
|
||||
if (it != mutexWaitList.end()) {
|
||||
mutexWaitList.insert(it, thread);
|
||||
} else {
|
||||
mutexWaitList.push_back(thread);
|
||||
}
|
||||
}
|
||||
|
||||
KThread::MutexWaitList::iterator KThread::RemoveFromMutexWaitList(KThread::MutexWaitList::const_iterator it)
|
||||
{
|
||||
// TODO: check&decrement numKernelMutexWaiters
|
||||
return mutexWaitList.erase(it);
|
||||
}
|
||||
|
||||
void KThread::RemoveFromMutexWaitList(const KThread &t)
|
||||
{
|
||||
RemoveFromMutexWaitList(mutexWaitList.iterator_to(t));
|
||||
}
|
||||
|
||||
void KThread::InheritDynamicPriority()
|
||||
{
|
||||
/*
|
||||
Do priority inheritance
|
||||
Since we're maybe changing the priority of the thread,
|
||||
we must go through the entire mutex owner chain.
|
||||
The invariant must be preserved:
|
||||
A thread holding a mutex must have a higher-or-same priority than
|
||||
all threads waiting for it to release the mutex.
|
||||
*/
|
||||
|
||||
for (KThread *t = this; t != nullptr; t = t->wantedMutexOwner) {
|
||||
uint newPrio, oldPrio = priority;
|
||||
if (!mutexWaitList.empty() && mutexWaitList.front().priority < basePriority) {
|
||||
newPrio = mutexWaitList.front().priority;
|
||||
} else {
|
||||
newPrio = basePriority;
|
||||
}
|
||||
|
||||
if (newPrio == oldPrio) {
|
||||
break;
|
||||
} else {
|
||||
// Update everything that depends on dynamic priority:
|
||||
|
||||
// TODO update condvar
|
||||
// TODO update ctr arbiter
|
||||
priority = newPrio;
|
||||
// TODO update condvar
|
||||
// TODO update ctr arbiter
|
||||
if (CompareSchedulingStatusFull(SchedulingStatus::Running)) {
|
||||
KScheduler::Global::AdjustThreadPriorityChanged(*this, oldPrio, this == KCoreContext::GetCurrentInstance().GetCurrentThread());
|
||||
}
|
||||
|
||||
if (wantedMutexOwner != nullptr) {
|
||||
wantedMutexOwner->RemoveFromMutexWaitList(*this);
|
||||
wantedMutexOwner->AddToMutexWaitList(*this);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::AddMutexWaiter(KThread &waiter)
|
||||
{
|
||||
AddToMutexWaitList(waiter);
|
||||
InheritDynamicPriority();
|
||||
}
|
||||
|
||||
void KThread::RemoveMutexWaiter(KThread &waiter)
|
||||
{
|
||||
RemoveFromMutexWaitList(waiter);
|
||||
InheritDynamicPriority();
|
||||
}
|
||||
|
||||
KThread *KThread::RelinquishMutex(size_t *count, uiptr mutexAddr)
|
||||
{
|
||||
KThread *newOwner = nullptr;
|
||||
*count = 0;
|
||||
|
||||
// First in list wanting mutexAddr becomes owner, the rest is transferred
|
||||
for (auto it = mutexWaitList.begin(); it != mutexWaitList.end(); ) {
|
||||
if (it->wantedMutex != mutexAddr) {
|
||||
++it;
|
||||
continue;
|
||||
} else {
|
||||
KThread &t = *it;
|
||||
++(*count);
|
||||
it = RemoveFromMutexWaitList(it);
|
||||
if (newOwner == nullptr) {
|
||||
newOwner = &t;
|
||||
} else {
|
||||
newOwner->AddToMutexWaitList(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mutex waiters list have changed
|
||||
InheritDynamicPriority();
|
||||
if (newOwner != nullptr) {
|
||||
newOwner->InheritDynamicPriority();
|
||||
}
|
||||
|
||||
return newOwner;
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in a new issue