mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 20:31:14 +00:00
kern: refactor to use m_ for member variables
This commit is contained in:
parent
b8471bcd4e
commit
92f1e2d100
135 changed files with 3727 additions and 3734 deletions
|
@ -135,10 +135,10 @@ namespace ams::kern::arch::arm {
|
||||||
private:
|
private:
|
||||||
static inline u32 s_mask[cpu::NumCores];
|
static inline u32 s_mask[cpu::NumCores];
|
||||||
private:
|
private:
|
||||||
volatile GicDistributor *gicd;
|
volatile GicDistributor *m_gicd;
|
||||||
volatile GicCpuInterface *gicc;
|
volatile GicCpuInterface *m_gicc;
|
||||||
public:
|
public:
|
||||||
constexpr KInterruptController() : gicd(nullptr), gicc(nullptr) { /* ... */ }
|
constexpr KInterruptController() : m_gicd(nullptr), m_gicc(nullptr) { /* ... */ }
|
||||||
|
|
||||||
void Initialize(s32 core_id);
|
void Initialize(s32 core_id);
|
||||||
void Finalize(s32 core_id);
|
void Finalize(s32 core_id);
|
||||||
|
@ -149,7 +149,7 @@ namespace ams::kern::arch::arm {
|
||||||
void RestoreGlobal(const GlobalState *state) const;
|
void RestoreGlobal(const GlobalState *state) const;
|
||||||
public:
|
public:
|
||||||
u32 GetIrq() const {
|
u32 GetIrq() const {
|
||||||
return this->gicc->iar;
|
return m_gicc->iar;
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr s32 ConvertRawIrq(u32 irq) {
|
static constexpr s32 ConvertRawIrq(u32 irq) {
|
||||||
|
@ -157,69 +157,69 @@ namespace ams::kern::arch::arm {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Enable(s32 irq) const {
|
void Enable(s32 irq) const {
|
||||||
this->gicd->isenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
m_gicd->isenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Disable(s32 irq) const {
|
void Disable(s32 irq) const {
|
||||||
this->gicd->icenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
m_gicd->icenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Clear(s32 irq) const {
|
void Clear(s32 irq) const {
|
||||||
this->gicd->icpendr[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
m_gicd->icpendr[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetTarget(s32 irq, s32 core_id) const {
|
void SetTarget(s32 irq, s32 core_id) const {
|
||||||
this->gicd->itargetsr.bytes[irq] = this->gicd->itargetsr.bytes[irq] | GetGicMask(core_id);
|
m_gicd->itargetsr.bytes[irq] = m_gicd->itargetsr.bytes[irq] | GetGicMask(core_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClearTarget(s32 irq, s32 core_id) const {
|
void ClearTarget(s32 irq, s32 core_id) const {
|
||||||
this->gicd->itargetsr.bytes[irq] = this->gicd->itargetsr.bytes[irq] & ~GetGicMask(core_id);
|
m_gicd->itargetsr.bytes[irq] = m_gicd->itargetsr.bytes[irq] & ~GetGicMask(core_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetPriorityLevel(s32 irq, s32 level) const {
|
void SetPriorityLevel(s32 irq, s32 level) const {
|
||||||
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
|
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
|
||||||
this->gicd->ipriorityr.bytes[irq] = ToGicPriorityValue(level);
|
m_gicd->ipriorityr.bytes[irq] = ToGicPriorityValue(level);
|
||||||
}
|
}
|
||||||
|
|
||||||
s32 GetPriorityLevel(s32 irq) const {
|
s32 GetPriorityLevel(s32 irq) const {
|
||||||
return FromGicPriorityValue(this->gicd->ipriorityr.bytes[irq]);
|
return FromGicPriorityValue(m_gicd->ipriorityr.bytes[irq]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetPriorityLevel(s32 level) const {
|
void SetPriorityLevel(s32 level) const {
|
||||||
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
|
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
|
||||||
this->gicc->pmr = ToGicPriorityValue(level);
|
m_gicc->pmr = ToGicPriorityValue(level);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetEdge(s32 irq) const {
|
void SetEdge(s32 irq) const {
|
||||||
u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
u32 cfg = m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
||||||
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||||
cfg |= (0x2 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
cfg |= (0x2 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||||
this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetLevel(s32 irq) const {
|
void SetLevel(s32 irq) const {
|
||||||
u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
u32 cfg = m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
||||||
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||||
cfg |= (0x0 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
cfg |= (0x0 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||||
this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
|
void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
|
||||||
MESOSPHERE_ASSERT(IsSoftware(irq));
|
MESOSPHERE_ASSERT(IsSoftware(irq));
|
||||||
this->gicd->sgir = GetCpuTargetListMask(irq, core_mask);
|
m_gicd->sgir = GetCpuTargetListMask(irq, core_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SendInterProcessorInterrupt(s32 irq) {
|
void SendInterProcessorInterrupt(s32 irq) {
|
||||||
MESOSPHERE_ASSERT(IsSoftware(irq));
|
MESOSPHERE_ASSERT(IsSoftware(irq));
|
||||||
this->gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq;
|
m_gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
void EndOfInterrupt(u32 irq) const {
|
void EndOfInterrupt(u32 irq) const {
|
||||||
this->gicc->eoir = irq;
|
m_gicc->eoir = irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsInterruptDefined(s32 irq) const {
|
bool IsInterruptDefined(s32 irq) const {
|
||||||
const s32 num_interrupts = std::min(32 + 32 * (this->gicd->typer & 0x1F), static_cast<u32>(NumInterrupts));
|
const s32 num_interrupts = std::min(32 + 32 * (m_gicd->typer & 0x1F), static_cast<u32>(NumInterrupts));
|
||||||
return (0 <= irq && irq < num_interrupts);
|
return (0 <= irq && irq < num_interrupts);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
|
@ -270,7 +270,7 @@ namespace ams::kern::arch::arm {
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void SetGicMask(s32 core_id) const {
|
ALWAYS_INLINE void SetGicMask(s32 core_id) const {
|
||||||
s_mask[core_id] = this->gicd->itargetsr.bytes[0];
|
s_mask[core_id] = m_gicd->itargetsr.bytes[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE void SetupInterruptLines(s32 core_id) const;
|
NOINLINE void SetupInterruptLines(s32 core_id) const;
|
||||||
|
|
|
@ -44,16 +44,16 @@ namespace ams::kern::arch::arm64::init {
|
||||||
|
|
||||||
struct NoClear{};
|
struct NoClear{};
|
||||||
private:
|
private:
|
||||||
KPhysicalAddress l1_table;
|
KPhysicalAddress m_l1_table;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : l1_table(l1) { /* ... */ }
|
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : m_l1_table(l1) { /* ... */ }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) {
|
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) {
|
||||||
ClearNewPageTable(this->l1_table);
|
ClearNewPageTable(m_l1_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE uintptr_t GetL1TableAddress() const {
|
constexpr ALWAYS_INLINE uintptr_t GetL1TableAddress() const {
|
||||||
return GetInteger(this->l1_table);
|
return GetInteger(m_l1_table);
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
static constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KPhysicalAddress _l1_table, KVirtualAddress address) {
|
static constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KPhysicalAddress _l1_table, KVirtualAddress address) {
|
||||||
|
@ -83,7 +83,7 @@ namespace ams::kern::arch::arm64::init {
|
||||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
while (virt_addr < end_virt_addr) {
|
while (virt_addr < end_virt_addr) {
|
||||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||||
|
|
||||||
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
|
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
|
||||||
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
|
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
|
||||||
|
@ -137,7 +137,7 @@ namespace ams::kern::arch::arm64::init {
|
||||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
while (virt_addr < end_virt_addr) {
|
while (virt_addr < end_virt_addr) {
|
||||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||||
|
|
||||||
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
|
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
|
||||||
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
|
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
|
||||||
|
@ -194,7 +194,7 @@ namespace ams::kern::arch::arm64::init {
|
||||||
}
|
}
|
||||||
|
|
||||||
PageTableEntry *GetMappingEntry(KVirtualAddress virt_addr, size_t block_size) {
|
PageTableEntry *GetMappingEntry(KVirtualAddress virt_addr, size_t block_size) {
|
||||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||||
|
|
||||||
if (l1_entry->IsBlock()) {
|
if (l1_entry->IsBlock()) {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(block_size == L1BlockSize);
|
MESOSPHERE_INIT_ABORT_UNLESS(block_size == L1BlockSize);
|
||||||
|
@ -301,7 +301,7 @@ namespace ams::kern::arch::arm64::init {
|
||||||
|
|
||||||
/* Iteratively map pages until the requested region is mapped. */
|
/* Iteratively map pages until the requested region is mapped. */
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||||
|
|
||||||
/* Can we make an L1 block? */
|
/* Can we make an L1 block? */
|
||||||
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
|
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
|
||||||
|
@ -382,7 +382,7 @@ namespace ams::kern::arch::arm64::init {
|
||||||
|
|
||||||
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
|
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
|
||||||
/* Get the L1 entry. */
|
/* Get the L1 entry. */
|
||||||
const L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
const L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||||
|
|
||||||
if (l1_entry->IsBlock()) {
|
if (l1_entry->IsBlock()) {
|
||||||
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
|
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
|
||||||
|
@ -444,7 +444,7 @@ namespace ams::kern::arch::arm64::init {
|
||||||
};
|
};
|
||||||
|
|
||||||
while (virt_addr < end_virt_addr) {
|
while (virt_addr < end_virt_addr) {
|
||||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||||
|
|
||||||
/* If an L1 block is mapped, update. */
|
/* If an L1 block is mapped, update. */
|
||||||
if (l1_entry->IsBlock()) {
|
if (l1_entry->IsBlock()) {
|
||||||
|
@ -485,7 +485,7 @@ namespace ams::kern::arch::arm64::init {
|
||||||
|
|
||||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||||
while (virt_addr < end_virt_addr) {
|
while (virt_addr < end_virt_addr) {
|
||||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||||
|
|
||||||
/* If an L1 block is mapped, the address isn't free. */
|
/* If an L1 block is mapped, the address isn't free. */
|
||||||
if (l1_entry->IsBlock()) {
|
if (l1_entry->IsBlock()) {
|
||||||
|
@ -534,7 +534,7 @@ namespace ams::kern::arch::arm64::init {
|
||||||
|
|
||||||
/* Iteratively reprotect pages until the requested region is reprotected. */
|
/* Iteratively reprotect pages until the requested region is reprotected. */
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||||
|
|
||||||
/* Check if an L1 block is present. */
|
/* Check if an L1 block is present. */
|
||||||
if (l1_entry->IsBlock()) {
|
if (l1_entry->IsBlock()) {
|
||||||
|
@ -680,43 +680,43 @@ namespace ams::kern::arch::arm64::init {
|
||||||
uintptr_t free_bitmap;
|
uintptr_t free_bitmap;
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
State state;
|
State m_state;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KInitialPageAllocator() : state{} { /* ... */ }
|
constexpr ALWAYS_INLINE KInitialPageAllocator() : m_state{} { /* ... */ }
|
||||||
|
|
||||||
ALWAYS_INLINE void Initialize(uintptr_t address) {
|
ALWAYS_INLINE void Initialize(uintptr_t address) {
|
||||||
this->state.next_address = address + BITSIZEOF(this->state.free_bitmap) * PageSize;
|
m_state.next_address = address + BITSIZEOF(m_state.free_bitmap) * PageSize;
|
||||||
this->state.free_bitmap = ~uintptr_t();
|
m_state.free_bitmap = ~uintptr_t();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void InitializeFromState(uintptr_t state_val) {
|
ALWAYS_INLINE void InitializeFromState(uintptr_t state_val) {
|
||||||
if (kern::GetTargetFirmware() >= ams::TargetFirmware_10_0_0) {
|
if (kern::GetTargetFirmware() >= ams::TargetFirmware_10_0_0) {
|
||||||
this->state = *reinterpret_cast<State *>(state_val);
|
m_state = *reinterpret_cast<State *>(state_val);
|
||||||
} else {
|
} else {
|
||||||
this->state.next_address = state_val;
|
m_state.next_address = state_val;
|
||||||
this->state.free_bitmap = 0;
|
m_state.free_bitmap = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void GetFinalState(State *out) {
|
ALWAYS_INLINE void GetFinalState(State *out) {
|
||||||
*out = this->state;
|
*out = m_state;
|
||||||
this->state = {};
|
m_state = {};
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
virtual KPhysicalAddress Allocate() override {
|
virtual KPhysicalAddress Allocate() override {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(this->state.next_address != Null<uintptr_t>);
|
MESOSPHERE_INIT_ABORT_UNLESS(m_state.next_address != Null<uintptr_t>);
|
||||||
uintptr_t allocated = this->state.next_address;
|
uintptr_t allocated = m_state.next_address;
|
||||||
if (this->state.free_bitmap != 0) {
|
if (m_state.free_bitmap != 0) {
|
||||||
u64 index;
|
u64 index;
|
||||||
uintptr_t mask;
|
uintptr_t mask;
|
||||||
do {
|
do {
|
||||||
index = KSystemControl::Init::GenerateRandomRange(0, BITSIZEOF(this->state.free_bitmap) - 1);
|
index = KSystemControl::Init::GenerateRandomRange(0, BITSIZEOF(m_state.free_bitmap) - 1);
|
||||||
mask = (static_cast<uintptr_t>(1) << index);
|
mask = (static_cast<uintptr_t>(1) << index);
|
||||||
} while ((this->state.free_bitmap & mask) == 0);
|
} while ((m_state.free_bitmap & mask) == 0);
|
||||||
this->state.free_bitmap &= ~mask;
|
m_state.free_bitmap &= ~mask;
|
||||||
allocated = this->state.next_address - ((BITSIZEOF(this->state.free_bitmap) - index) * PageSize);
|
allocated = m_state.next_address - ((BITSIZEOF(m_state.free_bitmap) - index) * PageSize);
|
||||||
} else {
|
} else {
|
||||||
this->state.next_address += PageSize;
|
m_state.next_address += PageSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
ClearPhysicalMemory(allocated, PageSize);
|
ClearPhysicalMemory(allocated, PageSize);
|
||||||
|
|
|
@ -135,36 +135,36 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
NON_COPYABLE(GenericRegisterAccessorBase);
|
NON_COPYABLE(GenericRegisterAccessorBase);
|
||||||
NON_MOVEABLE(GenericRegisterAccessorBase);
|
NON_MOVEABLE(GenericRegisterAccessorBase);
|
||||||
private:
|
private:
|
||||||
u64 value;
|
u64 m_value;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : value(v) { /* ... */ }
|
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : m_value(v) { /* ... */ }
|
||||||
protected:
|
protected:
|
||||||
constexpr ALWAYS_INLINE u64 GetValue() const {
|
constexpr ALWAYS_INLINE u64 GetValue() const {
|
||||||
return this->value;
|
return m_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||||
return (this->value >> offset) & ((1ul << count) - 1);
|
return (m_value >> offset) & ((1ul << count) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
||||||
const u64 mask = ((1ul << count) - 1) << offset;
|
const u64 mask = ((1ul << count) - 1) << offset;
|
||||||
this->value &= ~mask;
|
m_value &= ~mask;
|
||||||
this->value |= (value & (mask >> offset)) << offset;
|
m_value |= (value & (mask >> offset)) << offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
|
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
|
||||||
const u64 mask = ((1ul << count) - 1) << offset;
|
const u64 mask = ((1ul << count) - 1) << offset;
|
||||||
this->value &= ~mask;
|
m_value &= ~mask;
|
||||||
this->value |= (value & mask);
|
m_value |= (value & mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
||||||
const u64 mask = 1ul << offset;
|
const u64 mask = 1ul << offset;
|
||||||
if (enabled) {
|
if (enabled) {
|
||||||
this->value |= mask;
|
m_value |= mask;
|
||||||
} else {
|
} else {
|
||||||
this->value &= ~mask;
|
m_value &= ~mask;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -21,9 +21,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
class KHardwareTimer : public KInterruptTask, public KHardwareTimerBase {
|
class KHardwareTimer : public KInterruptTask, public KHardwareTimerBase {
|
||||||
private:
|
private:
|
||||||
s64 maximum_time;
|
s64 m_maximum_time;
|
||||||
public:
|
public:
|
||||||
constexpr KHardwareTimer() : KInterruptTask(), KHardwareTimerBase(), maximum_time(std::numeric_limits<s64>::max()) { /* ... */ }
|
constexpr KHardwareTimer() : KInterruptTask(), KHardwareTimerBase(), m_maximum_time(std::numeric_limits<s64>::max()) { /* ... */ }
|
||||||
public:
|
public:
|
||||||
/* Public API. */
|
/* Public API. */
|
||||||
NOINLINE void Initialize();
|
NOINLINE void Initialize();
|
||||||
|
@ -38,7 +38,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
KScopedSpinLock lk(this->GetLock());
|
KScopedSpinLock lk(this->GetLock());
|
||||||
|
|
||||||
if (this->RegisterAbsoluteTaskImpl(task, task_time)) {
|
if (this->RegisterAbsoluteTaskImpl(task, task_time)) {
|
||||||
if (task_time <= this->maximum_time) {
|
if (task_time <= m_maximum_time) {
|
||||||
SetCompareValue(task_time);
|
SetCompareValue(task_time);
|
||||||
EnableInterrupt();
|
EnableInterrupt();
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,18 +47,18 @@ namespace ams::kern::arch::arm64 {
|
||||||
constexpr KGlobalInterruptEntry() : handler(nullptr), manually_cleared(false), needs_clear(false) { /* ... */ }
|
constexpr KGlobalInterruptEntry() : handler(nullptr), manually_cleared(false), needs_clear(false) { /* ... */ }
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
KCoreLocalInterruptEntry core_local_interrupts[cpu::NumCores][KInterruptController::NumLocalInterrupts]{};
|
KCoreLocalInterruptEntry m_core_local_interrupts[cpu::NumCores][KInterruptController::NumLocalInterrupts]{};
|
||||||
KInterruptController interrupt_controller{};
|
KInterruptController m_interrupt_controller{};
|
||||||
KInterruptController::LocalState local_states[cpu::NumCores]{};
|
KInterruptController::LocalState m_local_states[cpu::NumCores]{};
|
||||||
bool local_state_saved[cpu::NumCores]{};
|
bool m_local_state_saved[cpu::NumCores]{};
|
||||||
mutable KSpinLock global_interrupt_lock{};
|
mutable KSpinLock m_global_interrupt_lock{};
|
||||||
KGlobalInterruptEntry global_interrupts[KInterruptController::NumGlobalInterrupts]{};
|
KGlobalInterruptEntry m_global_interrupts[KInterruptController::NumGlobalInterrupts]{};
|
||||||
KInterruptController::GlobalState global_state{};
|
KInterruptController::GlobalState m_global_state{};
|
||||||
bool global_state_saved{};
|
bool m_global_state_saved{};
|
||||||
private:
|
private:
|
||||||
ALWAYS_INLINE KSpinLock &GetGlobalInterruptLock() const { return this->global_interrupt_lock; }
|
ALWAYS_INLINE KSpinLock &GetGlobalInterruptLock() const { return m_global_interrupt_lock; }
|
||||||
ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return this->global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; }
|
ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return m_global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; }
|
||||||
ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return this->core_local_interrupts[GetCurrentCoreId()][KInterruptController::GetLocalInterruptIndex(irq)]; }
|
ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return m_core_local_interrupts[GetCurrentCoreId()][KInterruptController::GetLocalInterruptIndex(irq)]; }
|
||||||
|
|
||||||
bool OnHandleInterrupt();
|
bool OnHandleInterrupt();
|
||||||
public:
|
public:
|
||||||
|
@ -71,15 +71,15 @@ namespace ams::kern::arch::arm64 {
|
||||||
NOINLINE void Restore(s32 core_id);
|
NOINLINE void Restore(s32 core_id);
|
||||||
|
|
||||||
bool IsInterruptDefined(s32 irq) const {
|
bool IsInterruptDefined(s32 irq) const {
|
||||||
return this->interrupt_controller.IsInterruptDefined(irq);
|
return m_interrupt_controller.IsInterruptDefined(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsGlobal(s32 irq) const {
|
bool IsGlobal(s32 irq) const {
|
||||||
return this->interrupt_controller.IsGlobal(irq);
|
return m_interrupt_controller.IsGlobal(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsLocal(s32 irq) const {
|
bool IsLocal(s32 irq) const {
|
||||||
return this->interrupt_controller.IsLocal(irq);
|
return m_interrupt_controller.IsLocal(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE Result BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
|
NOINLINE Result BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
|
||||||
|
@ -89,11 +89,11 @@ namespace ams::kern::arch::arm64 {
|
||||||
NOINLINE Result ClearInterrupt(s32 irq, s32 core_id);
|
NOINLINE Result ClearInterrupt(s32 irq, s32 core_id);
|
||||||
|
|
||||||
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
|
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
|
||||||
this->interrupt_controller.SendInterProcessorInterrupt(irq, core_mask);
|
m_interrupt_controller.SendInterProcessorInterrupt(irq, core_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq) {
|
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq) {
|
||||||
this->interrupt_controller.SendInterProcessorInterrupt(irq);
|
m_interrupt_controller.SendInterProcessorInterrupt(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void HandleInterrupt(bool user_mode);
|
static void HandleInterrupt(bool user_mode);
|
||||||
|
|
|
@ -92,15 +92,15 @@ namespace ams::kern::arch::arm64 {
|
||||||
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
|
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
KPageTableManager *manager;
|
KPageTableManager *m_manager;
|
||||||
u64 ttbr;
|
u64 m_ttbr;
|
||||||
u8 asid;
|
u8 m_asid;
|
||||||
protected:
|
protected:
|
||||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||||
virtual void FinalizeUpdate(PageLinkedList *page_list) override;
|
virtual void FinalizeUpdate(PageLinkedList *page_list) override;
|
||||||
|
|
||||||
KPageTableManager &GetPageTableManager() const { return *this->manager; }
|
KPageTableManager &GetPageTableManager() const { return *m_manager; }
|
||||||
private:
|
private:
|
||||||
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
|
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
|
||||||
/* Set basic attributes. */
|
/* Set basic attributes. */
|
||||||
|
@ -166,13 +166,13 @@ namespace ams::kern::arch::arm64 {
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KPageTable() : KPageTableBase(), manager(), ttbr(), asid() { /* ... */ }
|
constexpr KPageTable() : KPageTableBase(), m_manager(), m_ttbr(), m_asid() { /* ... */ }
|
||||||
|
|
||||||
static NOINLINE void Initialize(s32 core_id);
|
static NOINLINE void Initialize(s32 core_id);
|
||||||
|
|
||||||
ALWAYS_INLINE void Activate(u32 proc_id) {
|
ALWAYS_INLINE void Activate(u32 proc_id) {
|
||||||
cpu::DataSynchronizationBarrier();
|
cpu::DataSynchronizationBarrier();
|
||||||
cpu::SwitchProcess(this->ttbr, proc_id);
|
cpu::SwitchProcess(m_ttbr, proc_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
|
||||||
|
@ -225,7 +225,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
void OnTableUpdated() const {
|
void OnTableUpdated() const {
|
||||||
cpu::InvalidateTlbByAsid(this->asid);
|
cpu::InvalidateTlbByAsid(m_asid);
|
||||||
}
|
}
|
||||||
|
|
||||||
void OnKernelTableUpdated() const {
|
void OnKernelTableUpdated() const {
|
||||||
|
|
|
@ -105,50 +105,50 @@ namespace ams::kern::arch::arm64 {
|
||||||
ContigType_Contiguous = (0x1ul << 52),
|
ContigType_Contiguous = (0x1ul << 52),
|
||||||
};
|
};
|
||||||
protected:
|
protected:
|
||||||
u64 attributes;
|
u64 m_attributes;
|
||||||
public:
|
public:
|
||||||
/* Take in a raw attribute. */
|
/* Take in a raw attribute. */
|
||||||
constexpr explicit ALWAYS_INLINE PageTableEntry() : attributes() { /* ... */ }
|
constexpr explicit ALWAYS_INLINE PageTableEntry() : m_attributes() { /* ... */ }
|
||||||
constexpr explicit ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ }
|
constexpr explicit ALWAYS_INLINE PageTableEntry(u64 attr) : m_attributes(attr) { /* ... */ }
|
||||||
|
|
||||||
constexpr explicit ALWAYS_INLINE PageTableEntry(InvalidTag) : attributes(0) { /* ... */ }
|
constexpr explicit ALWAYS_INLINE PageTableEntry(InvalidTag) : m_attributes(0) { /* ... */ }
|
||||||
|
|
||||||
/* Extend a previous attribute. */
|
/* Extend a previous attribute. */
|
||||||
constexpr explicit ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ }
|
constexpr explicit ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : m_attributes(rhs.m_attributes | new_attr) { /* ... */ }
|
||||||
|
|
||||||
/* Construct a new attribute. */
|
/* Construct a new attribute. */
|
||||||
constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m)
|
constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m)
|
||||||
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionFlag_Valid) | static_cast<u64>(m))
|
: m_attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionFlag_Valid) | static_cast<u64>(m))
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
protected:
|
protected:
|
||||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||||
return (this->attributes >> offset) & ((1ul << count) - 1);
|
return (m_attributes >> offset) & ((1ul << count) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const {
|
constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const {
|
||||||
return this->attributes & (((1ul << count) - 1) << offset);
|
return m_attributes & (((1ul << count) - 1) << offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
||||||
const u64 mask = ((1ul << count) - 1) << offset;
|
const u64 mask = ((1ul << count) - 1) << offset;
|
||||||
this->attributes &= ~mask;
|
m_attributes &= ~mask;
|
||||||
this->attributes |= (value & (mask >> offset)) << offset;
|
m_attributes |= (value & (mask >> offset)) << offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
|
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
|
||||||
const u64 mask = ((1ul << count) - 1) << offset;
|
const u64 mask = ((1ul << count) - 1) << offset;
|
||||||
this->attributes &= ~mask;
|
m_attributes &= ~mask;
|
||||||
this->attributes |= (value & mask);
|
m_attributes |= (value & mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
||||||
const u64 mask = 1ul << offset;
|
const u64 mask = 1ul << offset;
|
||||||
if (enabled) {
|
if (enabled) {
|
||||||
this->attributes |= mask;
|
m_attributes |= mask;
|
||||||
} else {
|
} else {
|
||||||
this->attributes &= ~mask;
|
m_attributes &= ~mask;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
|
@ -167,9 +167,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
||||||
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
||||||
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
||||||
constexpr ALWAYS_INLINE bool IsBlock() const { return (this->attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; }
|
constexpr ALWAYS_INLINE bool IsBlock() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; }
|
||||||
constexpr ALWAYS_INLINE bool IsTable() const { return (this->attributes & ExtensionFlag_TestTableMask) == 2; }
|
constexpr ALWAYS_INLINE bool IsTable() const { return (m_attributes & ExtensionFlag_TestTableMask) == 2; }
|
||||||
constexpr ALWAYS_INLINE bool IsEmpty() const { return (this->attributes & ExtensionFlag_TestTableMask) == 0; }
|
constexpr ALWAYS_INLINE bool IsEmpty() const { return (m_attributes & ExtensionFlag_TestTableMask) == 0; }
|
||||||
constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; }
|
constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
|
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
|
||||||
|
@ -185,21 +185,21 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
|
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
|
||||||
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
||||||
return this->attributes & BaseMask;
|
return m_attributes & BaseMask;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsForMerge(u64 attr) const {
|
constexpr ALWAYS_INLINE bool IsForMerge(u64 attr) const {
|
||||||
constexpr u64 BaseMaskForMerge = ~static_cast<u64>(ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail);
|
constexpr u64 BaseMaskForMerge = ~static_cast<u64>(ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail);
|
||||||
return (this->attributes & BaseMaskForMerge) == attr;
|
return (m_attributes & BaseMaskForMerge) == attr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const {
|
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const {
|
||||||
return this->attributes;
|
return m_attributes;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
|
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
|
||||||
return this->attributes;
|
return m_attributes;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -262,7 +262,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const {
|
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const {
|
||||||
return this->attributes & GetEntryTemplateForL2BlockMask(idx);
|
return m_attributes & GetEntryTemplateForL2BlockMask(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
|
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
|
||||||
|
@ -322,7 +322,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const {
|
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const {
|
||||||
return this->attributes & GetEntryTemplateForL2BlockMask(idx);
|
return m_attributes & GetEntryTemplateForL2BlockMask(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3BlockMask(size_t idx) {
|
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3BlockMask(size_t idx) {
|
||||||
|
@ -339,7 +339,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const {
|
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const {
|
||||||
return this->attributes & GetEntryTemplateForL3BlockMask(idx);
|
return m_attributes & GetEntryTemplateForL3BlockMask(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
|
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
|
||||||
|
@ -376,7 +376,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const {
|
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const {
|
||||||
return this->attributes & GetEntryTemplateForL3BlockMask(idx);
|
return m_attributes & GetEntryTemplateForL3BlockMask(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
|
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
|
||||||
|
|
|
@ -77,16 +77,16 @@ namespace ams::kern::arch::arm64 {
|
||||||
ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const;
|
ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const;
|
||||||
ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const;
|
ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const;
|
||||||
private:
|
private:
|
||||||
L1PageTableEntry *table;
|
L1PageTableEntry *m_table;
|
||||||
bool is_kernel;
|
bool m_is_kernel;
|
||||||
u32 num_entries;
|
u32 m_num_entries;
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE KVirtualAddress GetTableEntry(KVirtualAddress table, size_t index) const {
|
ALWAYS_INLINE KVirtualAddress GetTableEntry(KVirtualAddress table, size_t index) const {
|
||||||
return table + index * sizeof(PageTableEntry);
|
return table + index * sizeof(PageTableEntry);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KProcessAddress address) const {
|
ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KProcessAddress address) const {
|
||||||
return GetPointer<L1PageTableEntry>(GetTableEntry(KVirtualAddress(this->table), GetL1Index(address) & (this->num_entries - 1)));
|
return GetPointer<L1PageTableEntry>(GetTableEntry(KVirtualAddress(m_table), GetL1Index(address) & (m_num_entries - 1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE L2PageTableEntry *GetL2EntryFromTable(KVirtualAddress table, KProcessAddress address) const {
|
ALWAYS_INLINE L2PageTableEntry *GetL2EntryFromTable(KVirtualAddress table, KProcessAddress address) const {
|
||||||
|
@ -105,7 +105,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KPageTableImpl() : table(), is_kernel(), num_entries() { /* ... */ }
|
constexpr KPageTableImpl() : m_table(), m_is_kernel(), m_num_entries() { /* ... */ }
|
||||||
|
|
||||||
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||||
NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||||
|
|
|
@ -21,274 +21,274 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
class KProcessPageTable {
|
class KProcessPageTable {
|
||||||
private:
|
private:
|
||||||
KPageTable page_table;
|
KPageTable m_page_table;
|
||||||
public:
|
public:
|
||||||
constexpr KProcessPageTable() : page_table() { /* ... */ }
|
constexpr KProcessPageTable() : m_page_table() { /* ... */ }
|
||||||
|
|
||||||
void Activate(u64 id) {
|
void Activate(u64 id) {
|
||||||
/* Activate the page table with the specified contextidr. */
|
/* Activate the page table with the specified contextidr. */
|
||||||
this->page_table.Activate(id);
|
m_page_table.Activate(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) {
|
Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) {
|
||||||
return this->page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager);
|
return m_page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Finalize() { this->page_table.Finalize(); }
|
void Finalize() { m_page_table.Finalize(); }
|
||||||
|
|
||||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||||
return this->page_table.SetMemoryPermission(addr, size, perm);
|
return m_page_table.SetMemoryPermission(addr, size, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||||
return this->page_table.SetProcessMemoryPermission(addr, size, perm);
|
return m_page_table.SetProcessMemoryPermission(addr, size, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
|
Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
|
||||||
return this->page_table.SetMemoryAttribute(addr, size, mask, attr);
|
return m_page_table.SetMemoryAttribute(addr, size, mask, attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SetHeapSize(KProcessAddress *out, size_t size) {
|
Result SetHeapSize(KProcessAddress *out, size_t size) {
|
||||||
return this->page_table.SetHeapSize(out, size);
|
return m_page_table.SetHeapSize(out, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SetMaxHeapSize(size_t size) {
|
Result SetMaxHeapSize(size_t size) {
|
||||||
return this->page_table.SetMaxHeapSize(size);
|
return m_page_table.SetMaxHeapSize(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
|
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
|
||||||
return this->page_table.QueryInfo(out_info, out_page_info, addr);
|
return m_page_table.QueryInfo(out_info, out_page_info, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const {
|
Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const {
|
||||||
return this->page_table.QueryPhysicalAddress(out, address);
|
return m_page_table.QueryPhysicalAddress(out, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
|
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
|
||||||
return this->page_table.QueryStaticMapping(out, address, size);
|
return m_page_table.QueryStaticMapping(out, address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
|
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
|
||||||
return this->page_table.QueryIoMapping(out, address, size);
|
return m_page_table.QueryIoMapping(out, address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||||
return this->page_table.MapMemory(dst_address, src_address, size);
|
return m_page_table.MapMemory(dst_address, src_address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||||
return this->page_table.UnmapMemory(dst_address, src_address, size);
|
return m_page_table.UnmapMemory(dst_address, src_address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||||
return this->page_table.MapCodeMemory(dst_address, src_address, size);
|
return m_page_table.MapCodeMemory(dst_address, src_address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||||
return this->page_table.UnmapCodeMemory(dst_address, src_address, size);
|
return m_page_table.UnmapCodeMemory(dst_address, src_address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||||
return this->page_table.MapIo(phys_addr, size, perm);
|
return m_page_table.MapIo(phys_addr, size, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||||
return this->page_table.MapStatic(phys_addr, size, perm);
|
return m_page_table.MapStatic(phys_addr, size, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
|
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
|
||||||
return this->page_table.MapRegion(region_type, perm);
|
return m_page_table.MapRegion(region_type, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
|
Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
|
||||||
return this->page_table.MapPageGroup(addr, pg, state, perm);
|
return m_page_table.MapPageGroup(addr, pg, state, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||||
return this->page_table.UnmapPageGroup(address, pg, state);
|
return m_page_table.UnmapPageGroup(address, pg, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
|
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||||
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm);
|
return m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
return this->page_table.MapPages(out_addr, num_pages, state, perm);
|
return m_page_table.MapPages(out_addr, num_pages, state, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
return this->page_table.MapPages(address, num_pages, state, perm);
|
return m_page_table.MapPages(address, num_pages, state, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
|
Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
|
||||||
return this->page_table.UnmapPages(addr, num_pages, state);
|
return m_page_table.UnmapPages(addr, num_pages, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
|
Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
|
||||||
return this->page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr);
|
return m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
|
Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
|
||||||
return this->page_table.InvalidateProcessDataCache(address, size);
|
return m_page_table.InvalidateProcessDataCache(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
|
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
|
||||||
return this->page_table.ReadDebugMemory(buffer, address, size);
|
return m_page_table.ReadDebugMemory(buffer, address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
|
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
|
||||||
return this->page_table.WriteDebugMemory(address, buffer, size);
|
return m_page_table.WriteDebugMemory(address, buffer, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
||||||
return this->page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned);
|
return m_page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
|
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
|
||||||
return this->page_table.UnlockForDeviceAddressSpace(address, size);
|
return m_page_table.UnlockForDeviceAddressSpace(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) {
|
Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) {
|
||||||
return this->page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size);
|
return m_page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
|
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
|
||||||
return this->page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size);
|
return m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
||||||
return this->page_table.LockForIpcUserBuffer(out, address, size);
|
return m_page_table.LockForIpcUserBuffer(out, address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
|
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
|
||||||
return this->page_table.UnlockForIpcUserBuffer(address, size);
|
return m_page_table.UnlockForIpcUserBuffer(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) {
|
Result LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) {
|
||||||
return this->page_table.LockForTransferMemory(out, address, size, perm);
|
return m_page_table.LockForTransferMemory(out, address, size, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
||||||
return this->page_table.UnlockForTransferMemory(address, size, pg);
|
return m_page_table.UnlockForTransferMemory(address, size, pg);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) {
|
Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) {
|
||||||
return this->page_table.LockForCodeMemory(out, address, size);
|
return m_page_table.LockForCodeMemory(out, address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
||||||
return this->page_table.UnlockForCodeMemory(address, size, pg);
|
return m_page_table.UnlockForCodeMemory(address, size, pg);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||||
return this->page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
return m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||||
return this->page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
return m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
||||||
return this->page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
|
return m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
||||||
return this->page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
|
return m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result CopyMemoryFromHeapToHeap(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
Result CopyMemoryFromHeapToHeap(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||||
return this->page_table.CopyMemoryFromHeapToHeap(dst_page_table.page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
return m_page_table.CopyMemoryFromHeapToHeap(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result CopyMemoryFromHeapToHeapWithoutCheckDestination(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
Result CopyMemoryFromHeapToHeapWithoutCheckDestination(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||||
return this->page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
return m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KProcessPageTable &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
|
Result SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KProcessPageTable &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
|
||||||
return this->page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.page_table, test_perm, dst_state, send);
|
return m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table, test_perm, dst_state, send);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state, KProcess *server_process) {
|
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state, KProcess *server_process) {
|
||||||
return this->page_table.CleanupForIpcServer(address, size, dst_state, server_process);
|
return m_page_table.CleanupForIpcServer(address, size, dst_state, server_process);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
||||||
return this->page_table.CleanupForIpcClient(address, size, dst_state);
|
return m_page_table.CleanupForIpcClient(address, size, dst_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapPhysicalMemory(KProcessAddress address, size_t size) {
|
Result MapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||||
return this->page_table.MapPhysicalMemory(address, size);
|
return m_page_table.MapPhysicalMemory(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||||
return this->page_table.UnmapPhysicalMemory(address, size);
|
return m_page_table.UnmapPhysicalMemory(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
||||||
return this->page_table.MapPhysicalMemoryUnsafe(address, size);
|
return m_page_table.MapPhysicalMemoryUnsafe(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
||||||
return this->page_table.UnmapPhysicalMemoryUnsafe(address, size);
|
return m_page_table.UnmapPhysicalMemoryUnsafe(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DumpMemoryBlocks() const {
|
void DumpMemoryBlocks() const {
|
||||||
return this->page_table.DumpMemoryBlocks();
|
return m_page_table.DumpMemoryBlocks();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DumpPageTable() const {
|
void DumpPageTable() const {
|
||||||
return this->page_table.DumpPageTable();
|
return m_page_table.DumpPageTable();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CountPageTables() const {
|
size_t CountPageTables() const {
|
||||||
return this->page_table.CountPageTables();
|
return m_page_table.CountPageTables();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
||||||
return this->page_table.GetPhysicalAddress(out, address);
|
return m_page_table.GetPhysicalAddress(out, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Contains(KProcessAddress addr, size_t size) const { return this->page_table.Contains(addr, size); }
|
bool Contains(KProcessAddress addr, size_t size) const { return m_page_table.Contains(addr, size); }
|
||||||
|
|
||||||
bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInAliasRegion(addr, size); }
|
bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInAliasRegion(addr, size); }
|
||||||
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInUnsafeAliasRegion(addr, size); }
|
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInUnsafeAliasRegion(addr, size); }
|
||||||
|
|
||||||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->page_table.CanContain(addr, size, state); }
|
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return m_page_table.CanContain(addr, size, state); }
|
||||||
|
|
||||||
KProcessAddress GetAddressSpaceStart() const { return this->page_table.GetAddressSpaceStart(); }
|
KProcessAddress GetAddressSpaceStart() const { return m_page_table.GetAddressSpaceStart(); }
|
||||||
KProcessAddress GetHeapRegionStart() const { return this->page_table.GetHeapRegionStart(); }
|
KProcessAddress GetHeapRegionStart() const { return m_page_table.GetHeapRegionStart(); }
|
||||||
KProcessAddress GetAliasRegionStart() const { return this->page_table.GetAliasRegionStart(); }
|
KProcessAddress GetAliasRegionStart() const { return m_page_table.GetAliasRegionStart(); }
|
||||||
KProcessAddress GetStackRegionStart() const { return this->page_table.GetStackRegionStart(); }
|
KProcessAddress GetStackRegionStart() const { return m_page_table.GetStackRegionStart(); }
|
||||||
KProcessAddress GetKernelMapRegionStart() const { return this->page_table.GetKernelMapRegionStart(); }
|
KProcessAddress GetKernelMapRegionStart() const { return m_page_table.GetKernelMapRegionStart(); }
|
||||||
KProcessAddress GetAliasCodeRegionStart() const { return this->page_table.GetAliasCodeRegionStart(); }
|
KProcessAddress GetAliasCodeRegionStart() const { return m_page_table.GetAliasCodeRegionStart(); }
|
||||||
|
|
||||||
size_t GetAddressSpaceSize() const { return this->page_table.GetAddressSpaceSize(); }
|
size_t GetAddressSpaceSize() const { return m_page_table.GetAddressSpaceSize(); }
|
||||||
size_t GetHeapRegionSize() const { return this->page_table.GetHeapRegionSize(); }
|
size_t GetHeapRegionSize() const { return m_page_table.GetHeapRegionSize(); }
|
||||||
size_t GetAliasRegionSize() const { return this->page_table.GetAliasRegionSize(); }
|
size_t GetAliasRegionSize() const { return m_page_table.GetAliasRegionSize(); }
|
||||||
size_t GetStackRegionSize() const { return this->page_table.GetStackRegionSize(); }
|
size_t GetStackRegionSize() const { return m_page_table.GetStackRegionSize(); }
|
||||||
size_t GetKernelMapRegionSize() const { return this->page_table.GetKernelMapRegionSize(); }
|
size_t GetKernelMapRegionSize() const { return m_page_table.GetKernelMapRegionSize(); }
|
||||||
size_t GetAliasCodeRegionSize() const { return this->page_table.GetAliasCodeRegionSize(); }
|
size_t GetAliasCodeRegionSize() const { return m_page_table.GetAliasCodeRegionSize(); }
|
||||||
|
|
||||||
size_t GetNormalMemorySize() const { return this->page_table.GetNormalMemorySize(); }
|
size_t GetNormalMemorySize() const { return m_page_table.GetNormalMemorySize(); }
|
||||||
|
|
||||||
size_t GetCodeSize() const { return this->page_table.GetCodeSize(); }
|
size_t GetCodeSize() const { return m_page_table.GetCodeSize(); }
|
||||||
size_t GetCodeDataSize() const { return this->page_table.GetCodeDataSize(); }
|
size_t GetCodeDataSize() const { return m_page_table.GetCodeDataSize(); }
|
||||||
|
|
||||||
size_t GetAliasCodeSize() const { return this->page_table.GetAliasCodeSize(); }
|
size_t GetAliasCodeSize() const { return m_page_table.GetAliasCodeSize(); }
|
||||||
size_t GetAliasCodeDataSize() const { return this->page_table.GetAliasCodeDataSize(); }
|
size_t GetAliasCodeDataSize() const { return m_page_table.GetAliasCodeDataSize(); }
|
||||||
|
|
||||||
u32 GetAllocateOption() const { return this->page_table.GetAllocateOption(); }
|
u32 GetAllocateOption() const { return m_page_table.GetAllocateOption(); }
|
||||||
|
|
||||||
KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) const {
|
KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) const {
|
||||||
return this->page_table.GetHeapPhysicalAddress(address);
|
return m_page_table.GetHeapPhysicalAddress(address);
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) const {
|
KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) const {
|
||||||
return this->page_table.GetHeapVirtualAddress(address);
|
return m_page_table.GetHeapVirtualAddress(address);
|
||||||
}
|
}
|
||||||
|
|
||||||
KBlockInfoManager *GetBlockInfoManager() {
|
KBlockInfoManager *GetBlockInfoManager() {
|
||||||
return this->page_table.GetBlockInfoManager();
|
return m_page_table.GetBlockInfoManager();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -21,19 +21,19 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
class KNotAlignedSpinLock {
|
class KNotAlignedSpinLock {
|
||||||
private:
|
private:
|
||||||
u32 packed_tickets;
|
u32 m_packed_tickets;
|
||||||
public:
|
public:
|
||||||
constexpr KNotAlignedSpinLock() : packed_tickets(0) { /* ... */ }
|
constexpr KNotAlignedSpinLock() : m_packed_tickets(0) { /* ... */ }
|
||||||
|
|
||||||
ALWAYS_INLINE void Lock() {
|
ALWAYS_INLINE void Lock() {
|
||||||
u32 tmp0, tmp1, tmp2;
|
u32 tmp0, tmp1, tmp2;
|
||||||
|
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" prfm pstl1keep, %[packed_tickets]\n"
|
" prfm pstl1keep, %[m_packed_tickets]\n"
|
||||||
"1:\n"
|
"1:\n"
|
||||||
" ldaxr %w[tmp0], %[packed_tickets]\n"
|
" ldaxr %w[tmp0], %[m_packed_tickets]\n"
|
||||||
" add %w[tmp2], %w[tmp0], #0x10000\n"
|
" add %w[tmp2], %w[tmp0], #0x10000\n"
|
||||||
" stxr %w[tmp1], %w[tmp2], %[packed_tickets]\n"
|
" stxr %w[tmp1], %w[tmp2], %[m_packed_tickets]\n"
|
||||||
" cbnz %w[tmp1], 1b\n"
|
" cbnz %w[tmp1], 1b\n"
|
||||||
" \n"
|
" \n"
|
||||||
" and %w[tmp1], %w[tmp0], #0xFFFF\n"
|
" and %w[tmp1], %w[tmp0], #0xFFFF\n"
|
||||||
|
@ -42,21 +42,21 @@ namespace ams::kern::arch::arm64 {
|
||||||
" sevl\n"
|
" sevl\n"
|
||||||
"2:\n"
|
"2:\n"
|
||||||
" wfe\n"
|
" wfe\n"
|
||||||
" ldaxrh %w[tmp1], %[packed_tickets]\n"
|
" ldaxrh %w[tmp1], %[m_packed_tickets]\n"
|
||||||
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
|
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
|
||||||
" b.ne 2b\n"
|
" b.ne 2b\n"
|
||||||
"3:\n"
|
"3:\n"
|
||||||
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [packed_tickets]"+Q"(this->packed_tickets)
|
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [m_packed_tickets]"+Q"(m_packed_tickets)
|
||||||
:
|
:
|
||||||
: "cc", "memory"
|
: "cc", "memory"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void Unlock() {
|
ALWAYS_INLINE void Unlock() {
|
||||||
const u32 value = this->packed_tickets + 1;
|
const u32 value = m_packed_tickets + 1;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" stlrh %w[value], %[packed_tickets]\n"
|
" stlrh %w[value], %[m_packed_tickets]\n"
|
||||||
: [packed_tickets]"+Q"(this->packed_tickets)
|
: [m_packed_tickets]"+Q"(m_packed_tickets)
|
||||||
: [value]"r"(value)
|
: [value]"r"(value)
|
||||||
: "memory"
|
: "memory"
|
||||||
);
|
);
|
||||||
|
@ -66,39 +66,39 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
class KAlignedSpinLock {
|
class KAlignedSpinLock {
|
||||||
private:
|
private:
|
||||||
alignas(cpu::DataCacheLineSize) u16 current_ticket;
|
alignas(cpu::DataCacheLineSize) u16 m_current_ticket;
|
||||||
alignas(cpu::DataCacheLineSize) u16 next_ticket;
|
alignas(cpu::DataCacheLineSize) u16 m_next_ticket;
|
||||||
public:
|
public:
|
||||||
constexpr KAlignedSpinLock() : current_ticket(0), next_ticket(0) { /* ... */ }
|
constexpr KAlignedSpinLock() : m_current_ticket(0), m_next_ticket(0) { /* ... */ }
|
||||||
|
|
||||||
ALWAYS_INLINE void Lock() {
|
ALWAYS_INLINE void Lock() {
|
||||||
u32 tmp0, tmp1, got_lock;
|
u32 tmp0, tmp1, got_lock;
|
||||||
|
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" prfm pstl1keep, %[next_ticket]\n"
|
" prfm pstl1keep, %[m_next_ticket]\n"
|
||||||
"1:\n"
|
"1:\n"
|
||||||
" ldaxrh %w[tmp0], %[next_ticket]\n"
|
" ldaxrh %w[tmp0], %[m_next_ticket]\n"
|
||||||
" add %w[tmp1], %w[tmp0], #0x1\n"
|
" add %w[tmp1], %w[tmp0], #0x1\n"
|
||||||
" stxrh %w[got_lock], %w[tmp1], %[next_ticket]\n"
|
" stxrh %w[got_lock], %w[tmp1], %[m_next_ticket]\n"
|
||||||
" cbnz %w[got_lock], 1b\n"
|
" cbnz %w[got_lock], 1b\n"
|
||||||
" \n"
|
" \n"
|
||||||
" sevl\n"
|
" sevl\n"
|
||||||
"2:\n"
|
"2:\n"
|
||||||
" wfe\n"
|
" wfe\n"
|
||||||
" ldaxrh %w[tmp1], %[current_ticket]\n"
|
" ldaxrh %w[tmp1], %[m_current_ticket]\n"
|
||||||
" cmp %w[tmp1], %w[tmp0]\n"
|
" cmp %w[tmp1], %w[tmp0]\n"
|
||||||
" b.ne 2b\n"
|
" b.ne 2b\n"
|
||||||
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [next_ticket]"+Q"(this->next_ticket)
|
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [m_next_ticket]"+Q"(m_next_ticket)
|
||||||
: [current_ticket]"Q"(this->current_ticket)
|
: [m_current_ticket]"Q"(m_current_ticket)
|
||||||
: "cc", "memory"
|
: "cc", "memory"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void Unlock() {
|
ALWAYS_INLINE void Unlock() {
|
||||||
const u32 value = this->current_ticket + 1;
|
const u32 value = m_current_ticket + 1;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" stlrh %w[value], %[current_ticket]\n"
|
" stlrh %w[value], %[m_current_ticket]\n"
|
||||||
: [current_ticket]"+Q"(this->current_ticket)
|
: [m_current_ticket]"+Q"(m_current_ticket)
|
||||||
: [value]"r"(value)
|
: [value]"r"(value)
|
||||||
: "memory"
|
: "memory"
|
||||||
);
|
);
|
||||||
|
|
|
@ -22,16 +22,16 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
class KSupervisorPageTable {
|
class KSupervisorPageTable {
|
||||||
private:
|
private:
|
||||||
KPageTable page_table;
|
KPageTable m_page_table;
|
||||||
u64 ttbr0_identity[cpu::NumCores];
|
u64 m_ttbr0_identity[cpu::NumCores];
|
||||||
public:
|
public:
|
||||||
constexpr KSupervisorPageTable() : page_table(), ttbr0_identity() { /* ... */ }
|
constexpr KSupervisorPageTable() : m_page_table(), m_ttbr0_identity() { /* ... */ }
|
||||||
|
|
||||||
NOINLINE void Initialize(s32 core_id);
|
NOINLINE void Initialize(s32 core_id);
|
||||||
|
|
||||||
void Activate() {
|
void Activate() {
|
||||||
/* Activate, using process id = 0xFFFFFFFF */
|
/* Activate, using process id = 0xFFFFFFFF */
|
||||||
this->page_table.Activate(0xFFFFFFFF);
|
m_page_table.Activate(0xFFFFFFFF);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ActivateForInit() {
|
void ActivateForInit() {
|
||||||
|
@ -42,37 +42,37 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
|
return m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
|
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
|
||||||
return this->page_table.UnmapPages(address, num_pages, state);
|
return m_page_table.UnmapPages(address, num_pages, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
return this->page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm);
|
return m_page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||||
return this->page_table.UnmapPageGroup(address, pg, state);
|
return m_page_table.UnmapPageGroup(address, pg, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
||||||
return this->page_table.GetPhysicalAddress(out, address);
|
return m_page_table.GetPhysicalAddress(out, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return this->ttbr0_identity[core_id]; }
|
constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return m_ttbr0_identity[core_id]; }
|
||||||
|
|
||||||
void DumpMemoryBlocks() const {
|
void DumpMemoryBlocks() const {
|
||||||
return this->page_table.DumpMemoryBlocks();
|
return m_page_table.DumpMemoryBlocks();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DumpPageTable() const {
|
void DumpPageTable() const {
|
||||||
return this->page_table.DumpPageTable();
|
return m_page_table.DumpPageTable();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CountPageTables() const {
|
size_t CountPageTables() const {
|
||||||
return this->page_table.CountPageTables();
|
return m_page_table.CountPageTables();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -45,19 +45,19 @@ namespace ams::kern::arch::arm64 {
|
||||||
u64 x28;
|
u64 x28;
|
||||||
u64 x29;
|
u64 x29;
|
||||||
};
|
};
|
||||||
} callee_saved;
|
} m_callee_saved;
|
||||||
u64 lr;
|
u64 m_lr;
|
||||||
u64 sp;
|
u64 m_sp;
|
||||||
u64 cpacr;
|
u64 m_cpacr;
|
||||||
u64 fpcr;
|
u64 m_fpcr;
|
||||||
u64 fpsr;
|
u64 m_fpsr;
|
||||||
alignas(0x10) u128 fpu_registers[NumFpuRegisters];
|
alignas(0x10) u128 m_fpu_registers[NumFpuRegisters];
|
||||||
bool locked;
|
bool m_locked;
|
||||||
private:
|
private:
|
||||||
static void RestoreFpuRegisters64(const KThreadContext &);
|
static void RestoreFpuRegisters64(const KThreadContext &);
|
||||||
static void RestoreFpuRegisters32(const KThreadContext &);
|
static void RestoreFpuRegisters32(const KThreadContext &);
|
||||||
public:
|
public:
|
||||||
constexpr explicit KThreadContext() : callee_saved(), lr(), sp(), cpacr(), fpcr(), fpsr(), fpu_registers(), locked() { /* ... */ }
|
constexpr explicit KThreadContext() : m_callee_saved(), m_lr(), m_sp(), m_cpacr(), m_fpcr(), m_fpsr(), m_fpu_registers(), m_locked() { /* ... */ }
|
||||||
|
|
||||||
Result Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main);
|
Result Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main);
|
||||||
Result Finalize();
|
Result Finalize();
|
||||||
|
@ -66,17 +66,17 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
static void FpuContextSwitchHandler(KThread *thread);
|
static void FpuContextSwitchHandler(KThread *thread);
|
||||||
|
|
||||||
u32 GetFpcr() const { return this->fpcr; }
|
u32 GetFpcr() const { return m_fpcr; }
|
||||||
u32 GetFpsr() const { return this->fpsr; }
|
u32 GetFpsr() const { return m_fpsr; }
|
||||||
|
|
||||||
void SetFpcr(u32 v) { this->fpcr = v; }
|
void SetFpcr(u32 v) { m_fpcr = v; }
|
||||||
void SetFpsr(u32 v) { this->fpsr = v; }
|
void SetFpsr(u32 v) { m_fpsr = v; }
|
||||||
|
|
||||||
void CloneFpuStatus();
|
void CloneFpuStatus();
|
||||||
|
|
||||||
void SetFpuRegisters(const u128 *v, bool is_64_bit);
|
void SetFpuRegisters(const u128 *v, bool is_64_bit);
|
||||||
|
|
||||||
const u128 *GetFpuRegisters() const { return this->fpu_registers; }
|
const u128 *GetFpuRegisters() const { return m_fpu_registers; }
|
||||||
public:
|
public:
|
||||||
static void OnThreadTerminating(const KThread *thread);
|
static void OnThreadTerminating(const KThread *thread);
|
||||||
};
|
};
|
||||||
|
|
|
@ -27,13 +27,13 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
private:
|
private:
|
||||||
static constexpr size_t TableCount = 4;
|
static constexpr size_t TableCount = 4;
|
||||||
private:
|
private:
|
||||||
KVirtualAddress tables[TableCount];
|
KVirtualAddress m_tables[TableCount];
|
||||||
u8 table_asids[TableCount];
|
u8 m_table_asids[TableCount];
|
||||||
u64 attached_device;
|
u64 m_attached_device;
|
||||||
u32 attached_value;
|
u32 m_attached_value;
|
||||||
u32 detached_value;
|
u32 m_detached_value;
|
||||||
u32 hs_attached_value;
|
u32 m_hs_attached_value;
|
||||||
u32 hs_detached_value;
|
u32 m_hs_detached_value;
|
||||||
private:
|
private:
|
||||||
static ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress addr) {
|
static ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress addr) {
|
||||||
const KMemoryRegion *hint = nullptr;
|
const KMemoryRegion *hint = nullptr;
|
||||||
|
@ -61,7 +61,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
return KPageTable::GetPageTablePhysicalAddress(addr);
|
return KPageTable::GetPageTablePhysicalAddress(addr);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KDevicePageTable() : tables(), table_asids(), attached_device(), attached_value(), detached_value(), hs_attached_value(), hs_detached_value() { /* ... */ }
|
constexpr KDevicePageTable() : m_tables(), m_table_asids(), m_attached_device(), m_attached_value(), m_detached_value(), m_hs_attached_value(), m_hs_detached_value() { /* ... */ }
|
||||||
|
|
||||||
Result Initialize(u64 space_address, u64 space_size);
|
Result Initialize(u64 space_address, u64 space_size);
|
||||||
void Finalize();
|
void Finalize();
|
||||||
|
|
|
@ -23,9 +23,9 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
using ThreadTree = KConditionVariable::ThreadTree;
|
using ThreadTree = KConditionVariable::ThreadTree;
|
||||||
private:
|
private:
|
||||||
ThreadTree tree;
|
ThreadTree m_tree;
|
||||||
public:
|
public:
|
||||||
constexpr KAddressArbiter() : tree() { /* ... */ }
|
constexpr KAddressArbiter() : m_tree() { /* ... */ }
|
||||||
|
|
||||||
Result SignalToAddress(uintptr_t addr, ams::svc::SignalType type, s32 value, s32 count) {
|
Result SignalToAddress(uintptr_t addr, ams::svc::SignalType type, s32 value, s32 count) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
|
|
@ -32,20 +32,20 @@ namespace ams::kern {
|
||||||
Type_Count,
|
Type_Count,
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
size_t bit_width;
|
size_t m_bit_width;
|
||||||
size_t address;
|
size_t m_address;
|
||||||
size_t size;
|
size_t m_size;
|
||||||
Type type;
|
Type m_type;
|
||||||
public:
|
public:
|
||||||
static uintptr_t GetAddressSpaceStart(size_t width, Type type);
|
static uintptr_t GetAddressSpaceStart(size_t width, Type type);
|
||||||
static size_t GetAddressSpaceSize(size_t width, Type type);
|
static size_t GetAddressSpaceSize(size_t width, Type type);
|
||||||
|
|
||||||
constexpr KAddressSpaceInfo(size_t bw, size_t a, size_t s, Type t) : bit_width(bw), address(a), size(s), type(t) { /* ... */ }
|
constexpr KAddressSpaceInfo(size_t bw, size_t a, size_t s, Type t) : m_bit_width(bw), m_address(a), m_size(s), m_type(t) { /* ... */ }
|
||||||
|
|
||||||
constexpr size_t GetWidth() const { return this->bit_width; }
|
constexpr size_t GetWidth() const { return m_bit_width; }
|
||||||
constexpr size_t GetAddress() const { return this->address; }
|
constexpr size_t GetAddress() const { return m_address; }
|
||||||
constexpr size_t GetSize() const { return this->size; }
|
constexpr size_t GetSize() const { return m_size; }
|
||||||
constexpr Type GetType() const { return this->type; }
|
constexpr Type GetType() const { return m_type; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,38 +23,38 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
static constexpr u64 AllowedAffinityMask = (1ul << cpu::NumCores) - 1;
|
static constexpr u64 AllowedAffinityMask = (1ul << cpu::NumCores) - 1;
|
||||||
private:
|
private:
|
||||||
u64 mask;
|
u64 m_mask;
|
||||||
private:
|
private:
|
||||||
static constexpr ALWAYS_INLINE u64 GetCoreBit(s32 core) {
|
static constexpr ALWAYS_INLINE u64 GetCoreBit(s32 core) {
|
||||||
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
|
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
|
||||||
return (1ul << core);
|
return (1ul << core);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KAffinityMask() : mask(0) { MESOSPHERE_ASSERT_THIS(); }
|
constexpr ALWAYS_INLINE KAffinityMask() : m_mask(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetAffinityMask() const { return this->mask; }
|
constexpr ALWAYS_INLINE u64 GetAffinityMask() const { return m_mask; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetAffinityMask(u64 new_mask) {
|
constexpr ALWAYS_INLINE void SetAffinityMask(u64 new_mask) {
|
||||||
MESOSPHERE_ASSERT((new_mask & ~AllowedAffinityMask) == 0);
|
MESOSPHERE_ASSERT((new_mask & ~AllowedAffinityMask) == 0);
|
||||||
this->mask = new_mask;
|
m_mask = new_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool GetAffinity(s32 core) const {
|
constexpr ALWAYS_INLINE bool GetAffinity(s32 core) const {
|
||||||
return this->mask & GetCoreBit(core);
|
return m_mask & GetCoreBit(core);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetAffinity(s32 core, bool set) {
|
constexpr ALWAYS_INLINE void SetAffinity(s32 core, bool set) {
|
||||||
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
|
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
|
||||||
|
|
||||||
if (set) {
|
if (set) {
|
||||||
this->mask |= GetCoreBit(core);
|
m_mask |= GetCoreBit(core);
|
||||||
} else {
|
} else {
|
||||||
this->mask &= ~GetCoreBit(core);
|
m_mask &= ~GetCoreBit(core);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetAll() {
|
constexpr ALWAYS_INLINE void SetAll() {
|
||||||
this->mask = AllowedAffinityMask;
|
m_mask = AllowedAffinityMask;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -46,13 +46,13 @@ namespace ams::kern {
|
||||||
protected:
|
protected:
|
||||||
class TypeObj {
|
class TypeObj {
|
||||||
private:
|
private:
|
||||||
const char *name;
|
const char *m_name;
|
||||||
ClassTokenType class_token;
|
ClassTokenType m_class_token;
|
||||||
public:
|
public:
|
||||||
constexpr explicit TypeObj(const char *n, ClassTokenType tok) : name(n), class_token(tok) { /* ... */ }
|
constexpr explicit TypeObj(const char *n, ClassTokenType tok) : m_name(n), m_class_token(tok) { /* ... */ }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE const char *GetName() const { return this->name; }
|
constexpr ALWAYS_INLINE const char *GetName() const { return m_name; }
|
||||||
constexpr ALWAYS_INLINE ClassTokenType GetClassToken() const { return this->class_token; }
|
constexpr ALWAYS_INLINE ClassTokenType GetClassToken() const { return m_class_token; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool operator==(const TypeObj &rhs) {
|
constexpr ALWAYS_INLINE bool operator==(const TypeObj &rhs) {
|
||||||
return this->GetClassToken() == rhs.GetClassToken();
|
return this->GetClassToken() == rhs.GetClassToken();
|
||||||
|
@ -69,11 +69,11 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
|
||||||
private:
|
private:
|
||||||
std::atomic<u32> ref_count;
|
std::atomic<u32> m_ref_count;
|
||||||
public:
|
public:
|
||||||
static KAutoObject *Create(KAutoObject *ptr);
|
static KAutoObject *Create(KAutoObject *ptr);
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE explicit KAutoObject() : ref_count(0) { MESOSPHERE_ASSERT_THIS(); }
|
constexpr ALWAYS_INLINE explicit KAutoObject() : m_ref_count(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||||
virtual ~KAutoObject() { MESOSPHERE_ASSERT_THIS(); }
|
virtual ~KAutoObject() { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
||||||
/* Destroy is responsible for destroying the auto object's resources when ref_count hits zero. */
|
/* Destroy is responsible for destroying the auto object's resources when ref_count hits zero. */
|
||||||
|
@ -85,7 +85,7 @@ namespace ams::kern {
|
||||||
virtual KProcess *GetOwner() const { return nullptr; }
|
virtual KProcess *GetOwner() const { return nullptr; }
|
||||||
|
|
||||||
u32 GetReferenceCount() const {
|
u32 GetReferenceCount() const {
|
||||||
return this->ref_count;
|
return m_ref_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const {
|
ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const {
|
||||||
|
@ -124,14 +124,14 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
/* Atomically increment the reference count, only if it's positive. */
|
/* Atomically increment the reference count, only if it's positive. */
|
||||||
u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire);
|
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
|
||||||
do {
|
do {
|
||||||
if (AMS_UNLIKELY(cur_ref_count == 0)) {
|
if (AMS_UNLIKELY(cur_ref_count == 0)) {
|
||||||
MESOSPHERE_AUDIT(cur_ref_count != 0);
|
MESOSPHERE_AUDIT(cur_ref_count != 0);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
MESOSPHERE_ABORT_UNLESS(cur_ref_count < cur_ref_count + 1);
|
MESOSPHERE_ABORT_UNLESS(cur_ref_count < cur_ref_count + 1);
|
||||||
} while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed));
|
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -140,10 +140,10 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
/* Atomically decrement the reference count, not allowing it to become negative. */
|
/* Atomically decrement the reference count, not allowing it to become negative. */
|
||||||
u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire);
|
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
|
||||||
do {
|
do {
|
||||||
MESOSPHERE_ABORT_UNLESS(cur_ref_count > 0);
|
MESOSPHERE_ABORT_UNLESS(cur_ref_count > 0);
|
||||||
} while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed));
|
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed));
|
||||||
|
|
||||||
/* If ref count hits zero, destroy the object. */
|
/* If ref count hits zero, destroy the object. */
|
||||||
if (cur_ref_count - 1 == 0) {
|
if (cur_ref_count - 1 == 0) {
|
||||||
|
@ -185,44 +185,44 @@ namespace ams::kern {
|
||||||
template<typename U>
|
template<typename U>
|
||||||
friend class KScopedAutoObject;
|
friend class KScopedAutoObject;
|
||||||
private:
|
private:
|
||||||
T *obj;
|
T *m_obj;
|
||||||
private:
|
private:
|
||||||
constexpr ALWAYS_INLINE void Swap(KScopedAutoObject &rhs) {
|
constexpr ALWAYS_INLINE void Swap(KScopedAutoObject &rhs) {
|
||||||
std::swap(this->obj, rhs.obj);
|
std::swap(m_obj, rhs.m_obj);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KScopedAutoObject() : obj(nullptr) { /* ... */ }
|
constexpr ALWAYS_INLINE KScopedAutoObject() : m_obj(nullptr) { /* ... */ }
|
||||||
constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : obj(o) {
|
constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : m_obj(o) {
|
||||||
if (this->obj != nullptr) {
|
if (m_obj != nullptr) {
|
||||||
this->obj->Open();
|
m_obj->Open();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
~KScopedAutoObject() {
|
~KScopedAutoObject() {
|
||||||
if (this->obj != nullptr) {
|
if (m_obj != nullptr) {
|
||||||
this->obj->Close();
|
m_obj->Close();
|
||||||
}
|
}
|
||||||
this->obj = nullptr;
|
m_obj = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename U> requires (std::derived_from<T, U> || std::derived_from<U, T>)
|
template<typename U> requires (std::derived_from<T, U> || std::derived_from<U, T>)
|
||||||
constexpr KScopedAutoObject(KScopedAutoObject<U> &&rhs) {
|
constexpr KScopedAutoObject(KScopedAutoObject<U> &&rhs) {
|
||||||
if constexpr (std::derived_from<U, T>) {
|
if constexpr (std::derived_from<U, T>) {
|
||||||
/* Upcast. */
|
/* Upcast. */
|
||||||
this->obj = rhs.obj;
|
m_obj = rhs.m_obj;
|
||||||
rhs.obj = nullptr;
|
rhs.m_obj = nullptr;
|
||||||
} else {
|
} else {
|
||||||
/* Downcast. */
|
/* Downcast. */
|
||||||
T *derived = nullptr;
|
T *derived = nullptr;
|
||||||
if (rhs.obj != nullptr) {
|
if (rhs.m_obj != nullptr) {
|
||||||
derived = rhs.obj->template DynamicCast<T *>();
|
derived = rhs.m_obj->template DynamicCast<T *>();
|
||||||
if (derived == nullptr) {
|
if (derived == nullptr) {
|
||||||
rhs.obj->Close();
|
rhs.m_obj->Close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
this->obj = derived;
|
m_obj = derived;
|
||||||
rhs.obj = nullptr;
|
rhs.m_obj = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,19 +231,19 @@ namespace ams::kern {
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE T *operator->() { return this->obj; }
|
constexpr ALWAYS_INLINE T *operator->() { return m_obj; }
|
||||||
constexpr ALWAYS_INLINE T &operator*() { return *this->obj; }
|
constexpr ALWAYS_INLINE T &operator*() { return *m_obj; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void Reset(T *o) {
|
constexpr ALWAYS_INLINE void Reset(T *o) {
|
||||||
KScopedAutoObject(o).Swap(*this);
|
KScopedAutoObject(o).Swap(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE T *GetPointerUnsafe() { return this->obj; }
|
constexpr ALWAYS_INLINE T *GetPointerUnsafe() { return m_obj; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE T *ReleasePointerUnsafe() { T *ret = this->obj; this->obj = nullptr; return ret; }
|
constexpr ALWAYS_INLINE T *ReleasePointerUnsafe() { T *ret = m_obj; m_obj = nullptr; return ret; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsNull() const { return this->obj == nullptr; }
|
constexpr ALWAYS_INLINE bool IsNull() const { return m_obj == nullptr; }
|
||||||
constexpr ALWAYS_INLINE bool IsNotNull() const { return this->obj != nullptr; }
|
constexpr ALWAYS_INLINE bool IsNotNull() const { return m_obj != nullptr; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -28,30 +28,30 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
class ListAccessor : public KScopedLightLock {
|
class ListAccessor : public KScopedLightLock {
|
||||||
private:
|
private:
|
||||||
ListType &list;
|
ListType &m_list;
|
||||||
public:
|
public:
|
||||||
explicit ListAccessor(KAutoObjectWithListContainer *container) : KScopedLightLock(container->lock), list(container->object_list) { /* ... */ }
|
explicit ListAccessor(KAutoObjectWithListContainer *container) : KScopedLightLock(container->m_lock), m_list(container->m_object_list) { /* ... */ }
|
||||||
explicit ListAccessor(KAutoObjectWithListContainer &container) : KScopedLightLock(container.lock), list(container.object_list) { /* ... */ }
|
explicit ListAccessor(KAutoObjectWithListContainer &container) : KScopedLightLock(container.m_lock), m_list(container.m_object_list) { /* ... */ }
|
||||||
|
|
||||||
typename ListType::iterator begin() const {
|
typename ListType::iterator begin() const {
|
||||||
return this->list.begin();
|
return m_list.begin();
|
||||||
}
|
}
|
||||||
|
|
||||||
typename ListType::iterator end() const {
|
typename ListType::iterator end() const {
|
||||||
return this->list.end();
|
return m_list.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
typename ListType::iterator find(typename ListType::const_reference ref) const {
|
typename ListType::iterator find(typename ListType::const_reference ref) const {
|
||||||
return this->list.find(ref);
|
return m_list.find(ref);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
friend class ListAccessor;
|
friend class ListAccessor;
|
||||||
private:
|
private:
|
||||||
KLightLock lock;
|
KLightLock m_lock;
|
||||||
ListType object_list;
|
ListType m_object_list;
|
||||||
public:
|
public:
|
||||||
constexpr KAutoObjectWithListContainer() : lock(), object_list() { MESOSPHERE_ASSERT_THIS(); }
|
constexpr KAutoObjectWithListContainer() : m_lock(), m_object_list() { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
||||||
void Initialize() { MESOSPHERE_ASSERT_THIS(); }
|
void Initialize() { MESOSPHERE_ASSERT_THIS(); }
|
||||||
void Finalize() { MESOSPHERE_ASSERT_THIS(); }
|
void Finalize() { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
|
@ -29,10 +29,10 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
/* NOTE: Official KBeta has size 0x88, corresponding to 0x58 bytes of fields. */
|
/* NOTE: Official KBeta has size 0x88, corresponding to 0x58 bytes of fields. */
|
||||||
/* TODO: Add these fields, if KBeta is ever instantiable in the NX kernel. */
|
/* TODO: Add these fields, if KBeta is ever instantiable in the NX kernel. */
|
||||||
util::IntrusiveListNode process_list_node;
|
util::IntrusiveListNode m_process_list_node;
|
||||||
public:
|
public:
|
||||||
explicit KBeta()
|
explicit KBeta()
|
||||||
: process_list_node()
|
: m_process_list_node()
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
|
|
@ -200,14 +200,14 @@ namespace ams::kern {
|
||||||
CapabilityFlag<CapabilityType::HandleTable> |
|
CapabilityFlag<CapabilityType::HandleTable> |
|
||||||
CapabilityFlag<CapabilityType::DebugFlags>;
|
CapabilityFlag<CapabilityType::DebugFlags>;
|
||||||
private:
|
private:
|
||||||
u8 svc_access_flags[SvcFlagCount]{};
|
u8 m_svc_access_flags[SvcFlagCount]{};
|
||||||
u8 irq_access_flags[IrqFlagCount]{};
|
u8 m_irq_access_flags[IrqFlagCount]{};
|
||||||
u64 core_mask{};
|
u64 m_core_mask{};
|
||||||
u64 priority_mask{};
|
u64 m_priority_mask{};
|
||||||
util::BitPack32 debug_capabilities{0};
|
util::BitPack32 m_debug_capabilities{0};
|
||||||
s32 handle_table_size{};
|
s32 m_handle_table_size{};
|
||||||
util::BitPack32 intended_kernel_version{0};
|
util::BitPack32 m_intended_kernel_version{0};
|
||||||
u32 program_type{};
|
u32 m_program_type{};
|
||||||
private:
|
private:
|
||||||
static constexpr ALWAYS_INLINE void SetSvcAllowedImpl(u8 *data, u32 id) {
|
static constexpr ALWAYS_INLINE void SetSvcAllowedImpl(u8 *data, u32 id) {
|
||||||
constexpr size_t BitsPerWord = BITSIZEOF(*data);
|
constexpr size_t BitsPerWord = BITSIZEOF(*data);
|
||||||
|
@ -228,8 +228,8 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SetSvcAllowed(u32 id) {
|
bool SetSvcAllowed(u32 id) {
|
||||||
if (id < BITSIZEOF(this->svc_access_flags)) {
|
if (id < BITSIZEOF(m_svc_access_flags)) {
|
||||||
SetSvcAllowedImpl(this->svc_access_flags, id);
|
SetSvcAllowedImpl(m_svc_access_flags, id);
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
|
@ -237,9 +237,9 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SetInterruptPermitted(u32 id) {
|
bool SetInterruptPermitted(u32 id) {
|
||||||
constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]);
|
constexpr size_t BitsPerWord = BITSIZEOF(m_irq_access_flags[0]);
|
||||||
if (id < BITSIZEOF(this->irq_access_flags)) {
|
if (id < BITSIZEOF(m_irq_access_flags)) {
|
||||||
this->irq_access_flags[id / BitsPerWord] |= (1ul << (id % BitsPerWord));
|
m_irq_access_flags[id / BitsPerWord] |= (1ul << (id % BitsPerWord));
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
|
@ -266,14 +266,14 @@ namespace ams::kern {
|
||||||
Result Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table);
|
Result Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table);
|
||||||
Result Initialize(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table);
|
Result Initialize(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table);
|
||||||
|
|
||||||
constexpr u64 GetCoreMask() const { return this->core_mask; }
|
constexpr u64 GetCoreMask() const { return m_core_mask; }
|
||||||
constexpr u64 GetPriorityMask() const { return this->priority_mask; }
|
constexpr u64 GetPriorityMask() const { return m_priority_mask; }
|
||||||
constexpr s32 GetHandleTableSize() const { return this->handle_table_size; }
|
constexpr s32 GetHandleTableSize() const { return m_handle_table_size; }
|
||||||
|
|
||||||
ALWAYS_INLINE void CopySvcPermissionsTo(KThread::StackParameters &sp) const {
|
ALWAYS_INLINE void CopySvcPermissionsTo(KThread::StackParameters &sp) const {
|
||||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission));
|
||||||
/* Copy permissions. */
|
/* Copy permissions. */
|
||||||
std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags));
|
std::memcpy(sp.svc_permission, m_svc_access_flags, sizeof(m_svc_access_flags));
|
||||||
|
|
||||||
/* Clear specific SVCs based on our state. */
|
/* Clear specific SVCs based on our state. */
|
||||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||||
|
@ -284,9 +284,9 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) const {
|
ALWAYS_INLINE void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) const {
|
||||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission));
|
||||||
/* Clear all permissions. */
|
/* Clear all permissions. */
|
||||||
std::memset(sp.svc_permission, 0, sizeof(this->svc_access_flags));
|
std::memset(sp.svc_permission, 0, sizeof(m_svc_access_flags));
|
||||||
|
|
||||||
/* Set specific SVCs based on our state. */
|
/* Set specific SVCs based on our state. */
|
||||||
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState);
|
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState);
|
||||||
|
@ -297,12 +297,12 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) const {
|
ALWAYS_INLINE void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) const {
|
||||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission));
|
||||||
/* Get whether we have access to return from exception. */
|
/* Get whether we have access to return from exception. */
|
||||||
const bool return_from_exception = GetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
const bool return_from_exception = GetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||||
|
|
||||||
/* Copy permissions. */
|
/* Copy permissions. */
|
||||||
std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags));
|
std::memcpy(sp.svc_permission, m_svc_access_flags, sizeof(m_svc_access_flags));
|
||||||
|
|
||||||
/* Clear/Set specific SVCs based on our state. */
|
/* Clear/Set specific SVCs based on our state. */
|
||||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||||
|
@ -313,21 +313,21 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void CopyEnterExceptionSvcPermissionsTo(KThread::StackParameters &sp) {
|
ALWAYS_INLINE void CopyEnterExceptionSvcPermissionsTo(KThread::StackParameters &sp) {
|
||||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission));
|
||||||
|
|
||||||
/* Set ReturnFromException if allowed. */
|
/* Set ReturnFromException if allowed. */
|
||||||
if (GetSvcAllowedImpl(this->svc_access_flags, svc::SvcId_ReturnFromException)) {
|
if (GetSvcAllowedImpl(m_svc_access_flags, svc::SvcId_ReturnFromException)) {
|
||||||
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set GetInfo if allowed. */
|
/* Set GetInfo if allowed. */
|
||||||
if (GetSvcAllowedImpl(this->svc_access_flags, svc::SvcId_GetInfo)) {
|
if (GetSvcAllowedImpl(m_svc_access_flags, svc::SvcId_GetInfo)) {
|
||||||
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo);
|
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void CopyLeaveExceptionSvcPermissionsTo(KThread::StackParameters &sp) {
|
ALWAYS_INLINE void CopyLeaveExceptionSvcPermissionsTo(KThread::StackParameters &sp) {
|
||||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission));
|
||||||
|
|
||||||
/* Clear ReturnFromException. */
|
/* Clear ReturnFromException. */
|
||||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||||
|
@ -339,24 +339,24 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsPermittedInterrupt(u32 id) const {
|
constexpr bool IsPermittedInterrupt(u32 id) const {
|
||||||
constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]);
|
constexpr size_t BitsPerWord = BITSIZEOF(m_irq_access_flags[0]);
|
||||||
if (id < BITSIZEOF(this->irq_access_flags)) {
|
if (id < BITSIZEOF(m_irq_access_flags)) {
|
||||||
return (this->irq_access_flags[id / BitsPerWord] & (1ul << (id % BitsPerWord))) != 0;
|
return (m_irq_access_flags[id / BitsPerWord] & (1ul << (id % BitsPerWord))) != 0;
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsPermittedDebug() const {
|
constexpr bool IsPermittedDebug() const {
|
||||||
return this->debug_capabilities.Get<DebugFlags::AllowDebug>();
|
return m_debug_capabilities.Get<DebugFlags::AllowDebug>();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool CanForceDebug() const {
|
constexpr bool CanForceDebug() const {
|
||||||
return this->debug_capabilities.Get<DebugFlags::ForceDebug>();
|
return m_debug_capabilities.Get<DebugFlags::ForceDebug>();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u32 GetIntendedKernelMajorVersion() const { return this->intended_kernel_version.Get<KernelVersion::MajorVersion>(); }
|
constexpr u32 GetIntendedKernelMajorVersion() const { return m_intended_kernel_version.Get<KernelVersion::MajorVersion>(); }
|
||||||
constexpr u32 GetIntendedKernelMinorVersion() const { return this->intended_kernel_version.Get<KernelVersion::MinorVersion>(); }
|
constexpr u32 GetIntendedKernelMinorVersion() const { return m_intended_kernel_version.Get<KernelVersion::MinorVersion>(); }
|
||||||
constexpr u32 GetIntendedKernelVersion() const { return ams::svc::EncodeKernelVersion(this->GetIntendedKernelMajorVersion(), this->GetIntendedKernelMinorVersion()); }
|
constexpr u32 GetIntendedKernelVersion() const { return ams::svc::EncodeKernelVersion(this->GetIntendedKernelMajorVersion(), this->GetIntendedKernelMinorVersion()); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -28,23 +28,23 @@ namespace ams::kern {
|
||||||
class KClientPort final : public KSynchronizationObject {
|
class KClientPort final : public KSynchronizationObject {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
|
||||||
private:
|
private:
|
||||||
std::atomic<s32> num_sessions;
|
std::atomic<s32> m_num_sessions;
|
||||||
std::atomic<s32> peak_sessions;
|
std::atomic<s32> m_peak_sessions;
|
||||||
s32 max_sessions;
|
s32 m_max_sessions;
|
||||||
KPort *parent;
|
KPort *m_parent;
|
||||||
public:
|
public:
|
||||||
constexpr KClientPort() : num_sessions(), peak_sessions(), max_sessions(), parent() { /* ... */ }
|
constexpr KClientPort() : m_num_sessions(), m_peak_sessions(), m_max_sessions(), m_parent() { /* ... */ }
|
||||||
virtual ~KClientPort() { /* ... */ }
|
virtual ~KClientPort() { /* ... */ }
|
||||||
|
|
||||||
void Initialize(KPort *parent, s32 max_sessions);
|
void Initialize(KPort *parent, s32 max_sessions);
|
||||||
void OnSessionFinalized();
|
void OnSessionFinalized();
|
||||||
void OnServerClosed();
|
void OnServerClosed();
|
||||||
|
|
||||||
constexpr const KPort *GetParent() const { return this->parent; }
|
constexpr const KPort *GetParent() const { return m_parent; }
|
||||||
|
|
||||||
ALWAYS_INLINE s32 GetNumSessions() const { return this->num_sessions; }
|
ALWAYS_INLINE s32 GetNumSessions() const { return m_num_sessions; }
|
||||||
ALWAYS_INLINE s32 GetPeakSessions() const { return this->peak_sessions; }
|
ALWAYS_INLINE s32 GetPeakSessions() const { return m_peak_sessions; }
|
||||||
ALWAYS_INLINE s32 GetMaxSessions() const { return this->max_sessions; }
|
ALWAYS_INLINE s32 GetMaxSessions() const { return m_max_sessions; }
|
||||||
|
|
||||||
bool IsLight() const;
|
bool IsLight() const;
|
||||||
|
|
||||||
|
|
|
@ -24,20 +24,20 @@ namespace ams::kern {
|
||||||
class KClientSession final : public KAutoObjectWithSlabHeapAndContainer<KClientSession, KAutoObjectWithList> {
|
class KClientSession final : public KAutoObjectWithSlabHeapAndContainer<KClientSession, KAutoObjectWithList> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
|
||||||
private:
|
private:
|
||||||
KSession *parent;
|
KSession *m_parent;
|
||||||
public:
|
public:
|
||||||
constexpr KClientSession() : parent() { /* ... */ }
|
constexpr KClientSession() : m_parent() { /* ... */ }
|
||||||
virtual ~KClientSession() { /* ... */ }
|
virtual ~KClientSession() { /* ... */ }
|
||||||
|
|
||||||
void Initialize(KSession *parent) {
|
void Initialize(KSession *parent) {
|
||||||
/* Set member variables. */
|
/* Set member variables. */
|
||||||
this->parent = parent;
|
m_parent = parent;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void Destroy() override;
|
virtual void Destroy() override;
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
|
||||||
constexpr KSession *GetParent() const { return this->parent; }
|
constexpr KSession *GetParent() const { return m_parent; }
|
||||||
|
|
||||||
Result SendSyncRequest(uintptr_t address, size_t size);
|
Result SendSyncRequest(uintptr_t address, size_t size);
|
||||||
Result SendAsyncRequest(KWritableEvent *event, uintptr_t address, size_t size);
|
Result SendAsyncRequest(KWritableEvent *event, uintptr_t address, size_t size);
|
||||||
|
|
|
@ -23,15 +23,15 @@ namespace ams::kern {
|
||||||
class KCodeMemory final : public KAutoObjectWithSlabHeapAndContainer<KCodeMemory, KAutoObjectWithList> {
|
class KCodeMemory final : public KAutoObjectWithSlabHeapAndContainer<KCodeMemory, KAutoObjectWithList> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
|
||||||
private:
|
private:
|
||||||
TYPED_STORAGE(KPageGroup) page_group;
|
TYPED_STORAGE(KPageGroup) m_page_group;
|
||||||
KProcess *owner;
|
KProcess *m_owner;
|
||||||
KProcessAddress address;
|
KProcessAddress m_address;
|
||||||
KLightLock lock;
|
KLightLock m_lock;
|
||||||
bool is_initialized;
|
bool m_is_initialized;
|
||||||
bool is_owner_mapped;
|
bool m_is_owner_mapped;
|
||||||
bool is_mapped;
|
bool m_is_mapped;
|
||||||
public:
|
public:
|
||||||
explicit KCodeMemory() : owner(nullptr), address(Null<KProcessAddress>), is_initialized(false), is_owner_mapped(false), is_mapped(false) {
|
explicit KCodeMemory() : m_owner(nullptr), m_address(Null<KProcessAddress>), m_is_initialized(false), m_is_owner_mapped(false), m_is_mapped(false) {
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,12 +45,12 @@ namespace ams::kern {
|
||||||
Result MapToOwner(KProcessAddress address, size_t size, ams::svc::MemoryPermission perm);
|
Result MapToOwner(KProcessAddress address, size_t size, ams::svc::MemoryPermission perm);
|
||||||
Result UnmapFromOwner(KProcessAddress address, size_t size);
|
Result UnmapFromOwner(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
virtual bool IsInitialized() const override { return this->is_initialized; }
|
virtual bool IsInitialized() const override { return m_is_initialized; }
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
|
||||||
KProcess *GetOwner() const { return this->owner; }
|
KProcess *GetOwner() const { return m_owner; }
|
||||||
KProcessAddress GetSourceAddress() { return this->address; }
|
KProcessAddress GetSourceAddress() { return m_address; }
|
||||||
size_t GetSize() const { return this->is_initialized ? GetReference(this->page_group).GetNumPages() * PageSize : 0; }
|
size_t GetSize() const { return m_is_initialized ? GetReference(m_page_group).GetNumPages() * PageSize : 0; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,9 +24,9 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
|
using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
|
||||||
private:
|
private:
|
||||||
ThreadTree tree;
|
ThreadTree m_tree;
|
||||||
public:
|
public:
|
||||||
constexpr KConditionVariable() : tree() { /* ... */ }
|
constexpr KConditionVariable() : m_tree() { /* ... */ }
|
||||||
|
|
||||||
/* Arbitration. */
|
/* Arbitration. */
|
||||||
Result SignalToAddress(KProcessAddress addr);
|
Result SignalToAddress(KProcessAddress addr);
|
||||||
|
|
|
@ -26,11 +26,11 @@ namespace ams::kern {
|
||||||
protected:
|
protected:
|
||||||
using DebugEventList = util::IntrusiveListBaseTraits<KEventInfo>::ListType;
|
using DebugEventList = util::IntrusiveListBaseTraits<KEventInfo>::ListType;
|
||||||
private:
|
private:
|
||||||
DebugEventList event_info_list;
|
DebugEventList m_event_info_list;
|
||||||
u32 continue_flags;
|
u32 m_continue_flags;
|
||||||
KProcess *process;
|
KProcess *m_process;
|
||||||
KLightLock lock;
|
KLightLock m_lock;
|
||||||
KProcess::State old_process_state;
|
KProcess::State m_old_process_state;
|
||||||
public:
|
public:
|
||||||
explicit KDebugBase() { /* ... */ }
|
explicit KDebugBase() { /* ... */ }
|
||||||
virtual ~KDebugBase() { /* ... */ }
|
virtual ~KDebugBase() { /* ... */ }
|
||||||
|
|
|
@ -24,19 +24,19 @@ namespace ams::kern {
|
||||||
class KDeviceAddressSpace final : public KAutoObjectWithSlabHeapAndContainer<KDeviceAddressSpace, KAutoObjectWithList> {
|
class KDeviceAddressSpace final : public KAutoObjectWithSlabHeapAndContainer<KDeviceAddressSpace, KAutoObjectWithList> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KDeviceAddressSpace, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KDeviceAddressSpace, KAutoObject);
|
||||||
private:
|
private:
|
||||||
KLightLock lock;
|
KLightLock m_lock;
|
||||||
KDevicePageTable table;
|
KDevicePageTable m_table;
|
||||||
u64 space_address;
|
u64 m_space_address;
|
||||||
u64 space_size;
|
u64 m_space_size;
|
||||||
bool is_initialized;
|
bool m_is_initialized;
|
||||||
public:
|
public:
|
||||||
constexpr KDeviceAddressSpace() : lock(), table(), space_address(), space_size(), is_initialized() { /* ... */ }
|
constexpr KDeviceAddressSpace() : m_lock(), m_table(), m_space_address(), m_space_size(), m_is_initialized() { /* ... */ }
|
||||||
virtual ~KDeviceAddressSpace() { /* ... */ }
|
virtual ~KDeviceAddressSpace() { /* ... */ }
|
||||||
|
|
||||||
Result Initialize(u64 address, u64 size);
|
Result Initialize(u64 address, u64 size);
|
||||||
virtual void Finalize() override;
|
virtual void Finalize() override;
|
||||||
|
|
||||||
virtual bool IsInitialized() const override { return this->is_initialized; }
|
virtual bool IsInitialized() const override { return m_is_initialized; }
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
|
||||||
Result Attach(ams::svc::DeviceName device_name);
|
Result Attach(ams::svc::DeviceName device_name);
|
||||||
|
|
|
@ -28,19 +28,19 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
class PageBuffer {
|
class PageBuffer {
|
||||||
private:
|
private:
|
||||||
u8 buffer[PageSize];
|
u8 m_buffer[PageSize];
|
||||||
};
|
};
|
||||||
static_assert(sizeof(PageBuffer) == PageSize);
|
static_assert(sizeof(PageBuffer) == PageSize);
|
||||||
private:
|
private:
|
||||||
KSpinLock lock;
|
KSpinLock m_lock;
|
||||||
KPageBitmap page_bitmap;
|
KPageBitmap m_page_bitmap;
|
||||||
size_t used;
|
size_t m_used;
|
||||||
size_t peak;
|
size_t m_peak;
|
||||||
size_t count;
|
size_t m_count;
|
||||||
KVirtualAddress address;
|
KVirtualAddress m_address;
|
||||||
size_t size;
|
size_t m_size;
|
||||||
public:
|
public:
|
||||||
KDynamicPageManager() : lock(), page_bitmap(), used(), peak(), count(), address(), size() { /* ... */ }
|
KDynamicPageManager() : m_lock(), m_page_bitmap(), m_used(), m_peak(), m_count(), m_address(), m_size() { /* ... */ }
|
||||||
|
|
||||||
Result Initialize(KVirtualAddress memory, size_t sz) {
|
Result Initialize(KVirtualAddress memory, size_t sz) {
|
||||||
/* We need to have positive size. */
|
/* We need to have positive size. */
|
||||||
|
@ -51,40 +51,40 @@ namespace ams::kern {
|
||||||
const size_t allocatable_size = sz - management_size;
|
const size_t allocatable_size = sz - management_size;
|
||||||
|
|
||||||
/* Set tracking fields. */
|
/* Set tracking fields. */
|
||||||
this->address = memory;
|
m_address = memory;
|
||||||
this->size = util::AlignDown(allocatable_size, sizeof(PageBuffer));
|
m_size = util::AlignDown(allocatable_size, sizeof(PageBuffer));
|
||||||
this->count = allocatable_size / sizeof(PageBuffer);
|
m_count = allocatable_size / sizeof(PageBuffer);
|
||||||
R_UNLESS(this->count > 0, svc::ResultOutOfMemory());
|
R_UNLESS(m_count > 0, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
/* Clear the management region. */
|
/* Clear the management region. */
|
||||||
u64 *management_ptr = GetPointer<u64>(this->address + allocatable_size);
|
u64 *management_ptr = GetPointer<u64>(m_address + allocatable_size);
|
||||||
std::memset(management_ptr, 0, management_size);
|
std::memset(management_ptr, 0, management_size);
|
||||||
|
|
||||||
/* Initialize the bitmap. */
|
/* Initialize the bitmap. */
|
||||||
this->page_bitmap.Initialize(management_ptr, this->count);
|
m_page_bitmap.Initialize(management_ptr, m_count);
|
||||||
|
|
||||||
/* Free the pages to the bitmap. */
|
/* Free the pages to the bitmap. */
|
||||||
std::memset(GetPointer<PageBuffer>(this->address), 0, this->count * sizeof(PageBuffer));
|
std::memset(GetPointer<PageBuffer>(m_address), 0, m_count * sizeof(PageBuffer));
|
||||||
for (size_t i = 0; i < this->count; i++) {
|
for (size_t i = 0; i < m_count; i++) {
|
||||||
this->page_bitmap.SetBit(i);
|
m_page_bitmap.SetBit(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KVirtualAddress GetAddress() const { return this->address; }
|
constexpr KVirtualAddress GetAddress() const { return m_address; }
|
||||||
constexpr size_t GetSize() const { return this->size; }
|
constexpr size_t GetSize() const { return m_size; }
|
||||||
constexpr size_t GetUsed() const { return this->used; }
|
constexpr size_t GetUsed() const { return m_used; }
|
||||||
constexpr size_t GetPeak() const { return this->peak; }
|
constexpr size_t GetPeak() const { return m_peak; }
|
||||||
constexpr size_t GetCount() const { return this->count; }
|
constexpr size_t GetCount() const { return m_count; }
|
||||||
|
|
||||||
PageBuffer *Allocate() {
|
PageBuffer *Allocate() {
|
||||||
/* Take the lock. */
|
/* Take the lock. */
|
||||||
KScopedInterruptDisable di;
|
KScopedInterruptDisable di;
|
||||||
KScopedSpinLock lk(this->lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
/* Find a random free block. */
|
/* Find a random free block. */
|
||||||
ssize_t soffset = this->page_bitmap.FindFreeBlock(true);
|
ssize_t soffset = m_page_bitmap.FindFreeBlock(true);
|
||||||
if (AMS_UNLIKELY(soffset < 0)) {
|
if (AMS_UNLIKELY(soffset < 0)) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -92,23 +92,23 @@ namespace ams::kern {
|
||||||
const size_t offset = static_cast<size_t>(soffset);
|
const size_t offset = static_cast<size_t>(soffset);
|
||||||
|
|
||||||
/* Update our tracking. */
|
/* Update our tracking. */
|
||||||
this->page_bitmap.ClearBit(offset);
|
m_page_bitmap.ClearBit(offset);
|
||||||
this->peak = std::max(this->peak, (++this->used));
|
m_peak = std::max(m_peak, (++m_used));
|
||||||
|
|
||||||
return GetPointer<PageBuffer>(this->address) + offset;
|
return GetPointer<PageBuffer>(m_address) + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Free(PageBuffer *pb) {
|
void Free(PageBuffer *pb) {
|
||||||
/* Take the lock. */
|
/* Take the lock. */
|
||||||
KScopedInterruptDisable di;
|
KScopedInterruptDisable di;
|
||||||
KScopedSpinLock lk(this->lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
/* Set the bit for the free page. */
|
/* Set the bit for the free page. */
|
||||||
size_t offset = (reinterpret_cast<uintptr_t>(pb) - GetInteger(this->address)) / sizeof(PageBuffer);
|
size_t offset = (reinterpret_cast<uintptr_t>(pb) - GetInteger(m_address)) / sizeof(PageBuffer);
|
||||||
this->page_bitmap.SetBit(offset);
|
m_page_bitmap.SetBit(offset);
|
||||||
|
|
||||||
/* Decrement our used count. */
|
/* Decrement our used count. */
|
||||||
--this->used;
|
--m_used;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -30,28 +30,28 @@ namespace ams::kern {
|
||||||
using Impl = impl::KSlabHeapImpl;
|
using Impl = impl::KSlabHeapImpl;
|
||||||
using PageBuffer = KDynamicPageManager::PageBuffer;
|
using PageBuffer = KDynamicPageManager::PageBuffer;
|
||||||
private:
|
private:
|
||||||
Impl impl;
|
Impl m_impl;
|
||||||
KDynamicPageManager *page_allocator;
|
KDynamicPageManager *m_page_allocator;
|
||||||
std::atomic<size_t> used;
|
std::atomic<size_t> m_used;
|
||||||
std::atomic<size_t> peak;
|
std::atomic<size_t> m_peak;
|
||||||
std::atomic<size_t> count;
|
std::atomic<size_t> m_count;
|
||||||
KVirtualAddress address;
|
KVirtualAddress m_address;
|
||||||
size_t size;
|
size_t m_size;
|
||||||
private:
|
private:
|
||||||
ALWAYS_INLINE Impl *GetImpl() {
|
ALWAYS_INLINE Impl *GetImpl() {
|
||||||
return std::addressof(this->impl);
|
return std::addressof(m_impl);
|
||||||
}
|
}
|
||||||
ALWAYS_INLINE const Impl *GetImpl() const {
|
ALWAYS_INLINE const Impl *GetImpl() const {
|
||||||
return std::addressof(this->impl);
|
return std::addressof(m_impl);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KDynamicSlabHeap() : impl(), page_allocator(), used(), peak(), count(), address(), size() { /* ... */ }
|
constexpr KDynamicSlabHeap() : m_impl(), m_page_allocator(), m_used(), m_peak(), m_count(), m_address(), m_size() { /* ... */ }
|
||||||
|
|
||||||
constexpr KVirtualAddress GetAddress() const { return this->address; }
|
constexpr KVirtualAddress GetAddress() const { return m_address; }
|
||||||
constexpr size_t GetSize() const { return this->size; }
|
constexpr size_t GetSize() const { return m_size; }
|
||||||
constexpr size_t GetUsed() const { return this->used; }
|
constexpr size_t GetUsed() const { return m_used; }
|
||||||
constexpr size_t GetPeak() const { return this->peak; }
|
constexpr size_t GetPeak() const { return m_peak; }
|
||||||
constexpr size_t GetCount() const { return this->count; }
|
constexpr size_t GetCount() const { return m_count; }
|
||||||
|
|
||||||
constexpr bool IsInRange(KVirtualAddress addr) const {
|
constexpr bool IsInRange(KVirtualAddress addr) const {
|
||||||
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
|
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
|
||||||
|
@ -59,22 +59,22 @@ namespace ams::kern {
|
||||||
|
|
||||||
void Initialize(KVirtualAddress memory, size_t sz) {
|
void Initialize(KVirtualAddress memory, size_t sz) {
|
||||||
/* Set tracking fields. */
|
/* Set tracking fields. */
|
||||||
this->address = memory;
|
m_address = memory;
|
||||||
this->count = sz / sizeof(T);
|
m_count = sz / sizeof(T);
|
||||||
this->size = this->count * sizeof(T);
|
m_size = m_count * sizeof(T);
|
||||||
|
|
||||||
/* Free blocks to memory. */
|
/* Free blocks to memory. */
|
||||||
u8 *cur = GetPointer<u8>(this->address + this->size);
|
u8 *cur = GetPointer<u8>(m_address + m_size);
|
||||||
for (size_t i = 0; i < this->count; i++) {
|
for (size_t i = 0; i < m_count; i++) {
|
||||||
cur -= sizeof(T);
|
cur -= sizeof(T);
|
||||||
this->GetImpl()->Free(cur);
|
this->GetImpl()->Free(cur);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Initialize(KDynamicPageManager *page_allocator) {
|
void Initialize(KDynamicPageManager *page_allocator) {
|
||||||
this->page_allocator = page_allocator;
|
m_page_allocator = page_allocator;
|
||||||
this->address = this->page_allocator->GetAddress();
|
m_address = m_page_allocator->GetAddress();
|
||||||
this->size = this->page_allocator->GetSize();
|
m_size = m_page_allocator->GetSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Initialize(KDynamicPageManager *page_allocator, size_t num_objects) {
|
void Initialize(KDynamicPageManager *page_allocator, size_t num_objects) {
|
||||||
|
@ -84,13 +84,13 @@ namespace ams::kern {
|
||||||
this->Initialize(page_allocator);
|
this->Initialize(page_allocator);
|
||||||
|
|
||||||
/* Allocate until we have the correct number of objects. */
|
/* Allocate until we have the correct number of objects. */
|
||||||
while (this->count < num_objects) {
|
while (m_count < num_objects) {
|
||||||
auto *allocated = reinterpret_cast<T *>(this->page_allocator->Allocate());
|
auto *allocated = reinterpret_cast<T *>(m_page_allocator->Allocate());
|
||||||
MESOSPHERE_ABORT_UNLESS(allocated != nullptr);
|
MESOSPHERE_ABORT_UNLESS(allocated != nullptr);
|
||||||
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||||
this->GetImpl()->Free(allocated + i);
|
this->GetImpl()->Free(allocated + i);
|
||||||
}
|
}
|
||||||
this->count += sizeof(PageBuffer) / sizeof(T);
|
m_count += sizeof(PageBuffer) / sizeof(T);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,14 +99,14 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* If we fail to allocate, try to get a new page from our next allocator. */
|
/* If we fail to allocate, try to get a new page from our next allocator. */
|
||||||
if (AMS_UNLIKELY(allocated == nullptr)) {
|
if (AMS_UNLIKELY(allocated == nullptr)) {
|
||||||
if (this->page_allocator != nullptr) {
|
if (m_page_allocator != nullptr) {
|
||||||
allocated = reinterpret_cast<T *>(this->page_allocator->Allocate());
|
allocated = reinterpret_cast<T *>(m_page_allocator->Allocate());
|
||||||
if (allocated != nullptr) {
|
if (allocated != nullptr) {
|
||||||
/* If we succeeded in getting a page, free the rest to our slab. */
|
/* If we succeeded in getting a page, free the rest to our slab. */
|
||||||
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||||
this->GetImpl()->Free(allocated + i);
|
this->GetImpl()->Free(allocated + i);
|
||||||
}
|
}
|
||||||
this->count += sizeof(PageBuffer) / sizeof(T);
|
m_count += sizeof(PageBuffer) / sizeof(T);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -116,10 +116,10 @@ namespace ams::kern {
|
||||||
new (allocated) T();
|
new (allocated) T();
|
||||||
|
|
||||||
/* Update our tracking. */
|
/* Update our tracking. */
|
||||||
size_t used = ++this->used;
|
size_t used = ++m_used;
|
||||||
size_t peak = this->peak;
|
size_t peak = m_peak;
|
||||||
while (peak < used) {
|
while (peak < used) {
|
||||||
if (this->peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
|
if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -130,7 +130,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
void Free(T *t) {
|
void Free(T *t) {
|
||||||
this->GetImpl()->Free(t);
|
this->GetImpl()->Free(t);
|
||||||
--this->used;
|
--m_used;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -25,13 +25,13 @@ namespace ams::kern {
|
||||||
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> {
|
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
|
||||||
private:
|
private:
|
||||||
KReadableEvent readable_event;
|
KReadableEvent m_readable_event;
|
||||||
KWritableEvent writable_event;
|
KWritableEvent m_writable_event;
|
||||||
KProcess *owner;
|
KProcess *m_owner;
|
||||||
bool initialized;
|
bool m_initialized;
|
||||||
public:
|
public:
|
||||||
constexpr KEvent()
|
constexpr KEvent()
|
||||||
: readable_event(), writable_event(), owner(), initialized()
|
: m_readable_event(), m_writable_event(), m_owner(), m_initialized()
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
@ -41,15 +41,15 @@ namespace ams::kern {
|
||||||
void Initialize();
|
void Initialize();
|
||||||
virtual void Finalize() override;
|
virtual void Finalize() override;
|
||||||
|
|
||||||
virtual bool IsInitialized() const override { return this->initialized; }
|
virtual bool IsInitialized() const override { return m_initialized; }
|
||||||
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(this->owner); }
|
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(m_owner); }
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg);
|
static void PostDestroy(uintptr_t arg);
|
||||||
|
|
||||||
virtual KProcess *GetOwner() const override { return this->owner; }
|
virtual KProcess *GetOwner() const override { return m_owner; }
|
||||||
|
|
||||||
KReadableEvent &GetReadableEvent() { return this->readable_event; }
|
KReadableEvent &GetReadableEvent() { return m_readable_event; }
|
||||||
KWritableEvent &GetWritableEvent() { return this->writable_event; }
|
KWritableEvent &GetWritableEvent() { return m_writable_event; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,38 +61,38 @@ namespace ams::kern {
|
||||||
u16 type;
|
u16 type;
|
||||||
} info;
|
} info;
|
||||||
Entry *next_free_entry;
|
Entry *next_free_entry;
|
||||||
} meta;
|
} m_meta;
|
||||||
KAutoObject *object;
|
KAutoObject *m_object;
|
||||||
public:
|
public:
|
||||||
constexpr Entry() : meta(), object(nullptr) { /* ... */ }
|
constexpr Entry() : m_meta(), m_object(nullptr) { /* ... */ }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetFree(Entry *next) {
|
constexpr ALWAYS_INLINE void SetFree(Entry *next) {
|
||||||
this->object = nullptr;
|
m_object = nullptr;
|
||||||
this->meta.next_free_entry = next;
|
m_meta.next_free_entry = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetUsed(KAutoObject *obj, u16 linear_id, u16 type) {
|
constexpr ALWAYS_INLINE void SetUsed(KAutoObject *obj, u16 linear_id, u16 type) {
|
||||||
this->object = obj;
|
m_object = obj;
|
||||||
this->meta.info = { linear_id, type };
|
m_meta.info = { linear_id, type };
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KAutoObject *GetObject() const { return this->object; }
|
constexpr ALWAYS_INLINE KAutoObject *GetObject() const { return m_object; }
|
||||||
constexpr ALWAYS_INLINE Entry *GetNextFreeEntry() const { return this->meta.next_free_entry; }
|
constexpr ALWAYS_INLINE Entry *GetNextFreeEntry() const { return m_meta.next_free_entry; }
|
||||||
constexpr ALWAYS_INLINE u16 GetLinearId() const { return this->meta.info.linear_id; }
|
constexpr ALWAYS_INLINE u16 GetLinearId() const { return m_meta.info.linear_id; }
|
||||||
constexpr ALWAYS_INLINE u16 GetType() const { return this->meta.info.type; }
|
constexpr ALWAYS_INLINE u16 GetType() const { return m_meta.info.type; }
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
mutable KSpinLock lock;
|
mutable KSpinLock m_lock;
|
||||||
Entry *table;
|
Entry *m_table;
|
||||||
Entry *free_head;
|
Entry *m_free_head;
|
||||||
Entry entries[MaxTableSize];
|
Entry m_entries[MaxTableSize];
|
||||||
u16 table_size;
|
u16 m_table_size;
|
||||||
u16 max_count;
|
u16 m_max_count;
|
||||||
u16 next_linear_id;
|
u16 m_next_linear_id;
|
||||||
u16 count;
|
u16 m_count;
|
||||||
public:
|
public:
|
||||||
constexpr KHandleTable() :
|
constexpr KHandleTable() :
|
||||||
lock(), table(nullptr), free_head(nullptr), entries(), table_size(0), max_count(0), next_linear_id(MinLinearId), count(0)
|
m_lock(), m_table(nullptr), m_free_head(nullptr), m_entries(), m_table_size(0), m_max_count(0), m_next_linear_id(MinLinearId), m_count(0)
|
||||||
{ MESOSPHERE_ASSERT_THIS(); }
|
{ MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
||||||
constexpr NOINLINE Result Initialize(s32 size) {
|
constexpr NOINLINE Result Initialize(s32 size) {
|
||||||
|
@ -101,26 +101,26 @@ namespace ams::kern {
|
||||||
R_UNLESS(size <= static_cast<s32>(MaxTableSize), svc::ResultOutOfMemory());
|
R_UNLESS(size <= static_cast<s32>(MaxTableSize), svc::ResultOutOfMemory());
|
||||||
|
|
||||||
/* Initialize all fields. */
|
/* Initialize all fields. */
|
||||||
this->table = this->entries;
|
m_table = m_entries;
|
||||||
this->table_size = (size <= 0) ? MaxTableSize : size;
|
m_table_size = (size <= 0) ? MaxTableSize : size;
|
||||||
this->next_linear_id = MinLinearId;
|
m_next_linear_id = MinLinearId;
|
||||||
this->count = 0;
|
m_count = 0;
|
||||||
this->max_count = 0;
|
m_max_count = 0;
|
||||||
|
|
||||||
/* Free all entries. */
|
/* Free all entries. */
|
||||||
for (size_t i = 0; i < static_cast<size_t>(this->table_size - 1); i++) {
|
for (size_t i = 0; i < static_cast<size_t>(m_table_size - 1); i++) {
|
||||||
this->entries[i].SetFree(std::addressof(this->entries[i + 1]));
|
m_entries[i].SetFree(std::addressof(m_entries[i + 1]));
|
||||||
}
|
}
|
||||||
this->entries[this->table_size - 1].SetFree(nullptr);
|
m_entries[m_table_size - 1].SetFree(nullptr);
|
||||||
|
|
||||||
this->free_head = std::addressof(this->entries[0]);
|
m_free_head = std::addressof(m_entries[0]);
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE size_t GetTableSize() const { return this->table_size; }
|
constexpr ALWAYS_INLINE size_t GetTableSize() const { return m_table_size; }
|
||||||
constexpr ALWAYS_INLINE size_t GetCount() const { return this->count; }
|
constexpr ALWAYS_INLINE size_t GetCount() const { return m_count; }
|
||||||
constexpr ALWAYS_INLINE size_t GetMaxCount() const { return this->max_count; }
|
constexpr ALWAYS_INLINE size_t GetMaxCount() const { return m_max_count; }
|
||||||
|
|
||||||
NOINLINE Result Finalize();
|
NOINLINE Result Finalize();
|
||||||
NOINLINE bool Remove(ams::svc::Handle handle);
|
NOINLINE bool Remove(ams::svc::Handle handle);
|
||||||
|
@ -129,7 +129,7 @@ namespace ams::kern {
|
||||||
ALWAYS_INLINE KScopedAutoObject<T> GetObjectWithoutPseudoHandle(ams::svc::Handle handle) const {
|
ALWAYS_INLINE KScopedAutoObject<T> GetObjectWithoutPseudoHandle(ams::svc::Handle handle) const {
|
||||||
/* Lock and look up in table. */
|
/* Lock and look up in table. */
|
||||||
KScopedDisableDispatch dd;
|
KScopedDisableDispatch dd;
|
||||||
KScopedSpinLock lk(this->lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
if constexpr (std::is_same<T, KAutoObject>::value) {
|
if constexpr (std::is_same<T, KAutoObject>::value) {
|
||||||
return this->GetObjectImpl(handle);
|
return this->GetObjectImpl(handle);
|
||||||
|
@ -163,7 +163,7 @@ namespace ams::kern {
|
||||||
KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(ams::svc::Handle handle) const {
|
KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(ams::svc::Handle handle) const {
|
||||||
/* Lock and look up in table. */
|
/* Lock and look up in table. */
|
||||||
KScopedDisableDispatch dd;
|
KScopedDisableDispatch dd;
|
||||||
KScopedSpinLock lk(this->lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
KAutoObject *obj = this->GetObjectImpl(handle);
|
KAutoObject *obj = this->GetObjectImpl(handle);
|
||||||
if (AMS_LIKELY(obj != nullptr)) {
|
if (AMS_LIKELY(obj != nullptr)) {
|
||||||
|
@ -190,7 +190,7 @@ namespace ams::kern {
|
||||||
ALWAYS_INLINE KScopedAutoObject<KAutoObject> GetObjectByIndex(ams::svc::Handle *out_handle, size_t index) const {
|
ALWAYS_INLINE KScopedAutoObject<KAutoObject> GetObjectByIndex(ams::svc::Handle *out_handle, size_t index) const {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
KScopedDisableDispatch dd;
|
KScopedDisableDispatch dd;
|
||||||
KScopedSpinLock lk(this->lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
return this->GetObjectByIndexImpl(out_handle, index);
|
return this->GetObjectByIndexImpl(out_handle, index);
|
||||||
}
|
}
|
||||||
|
@ -217,7 +217,7 @@ namespace ams::kern {
|
||||||
{
|
{
|
||||||
/* Lock the table. */
|
/* Lock the table. */
|
||||||
KScopedDisableDispatch dd;
|
KScopedDisableDispatch dd;
|
||||||
KScopedSpinLock lk(this->lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
for (num_opened = 0; num_opened < num_handles; num_opened++) {
|
for (num_opened = 0; num_opened < num_handles; num_opened++) {
|
||||||
/* Get the current handle. */
|
/* Get the current handle. */
|
||||||
const auto cur_handle = handles[num_opened];
|
const auto cur_handle = handles[num_opened];
|
||||||
|
@ -258,38 +258,38 @@ namespace ams::kern {
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE Entry *AllocateEntry() {
|
constexpr ALWAYS_INLINE Entry *AllocateEntry() {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
MESOSPHERE_ASSERT(this->count < this->table_size);
|
MESOSPHERE_ASSERT(m_count < m_table_size);
|
||||||
|
|
||||||
Entry *entry = this->free_head;
|
Entry *entry = m_free_head;
|
||||||
this->free_head = entry->GetNextFreeEntry();
|
m_free_head = entry->GetNextFreeEntry();
|
||||||
|
|
||||||
this->count++;
|
m_count++;
|
||||||
this->max_count = std::max(this->max_count, this->count);
|
m_max_count = std::max(m_max_count, m_count);
|
||||||
|
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void FreeEntry(Entry *entry) {
|
constexpr ALWAYS_INLINE void FreeEntry(Entry *entry) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
MESOSPHERE_ASSERT(this->count > 0);
|
MESOSPHERE_ASSERT(m_count > 0);
|
||||||
|
|
||||||
entry->SetFree(this->free_head);
|
entry->SetFree(m_free_head);
|
||||||
this->free_head = entry;
|
m_free_head = entry;
|
||||||
|
|
||||||
this->count--;
|
m_count--;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u16 AllocateLinearId() {
|
constexpr ALWAYS_INLINE u16 AllocateLinearId() {
|
||||||
const u16 id = this->next_linear_id++;
|
const u16 id = m_next_linear_id++;
|
||||||
if (this->next_linear_id > MaxLinearId) {
|
if (m_next_linear_id > MaxLinearId) {
|
||||||
this->next_linear_id = MinLinearId;
|
m_next_linear_id = MinLinearId;
|
||||||
}
|
}
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE size_t GetEntryIndex(Entry *entry) {
|
constexpr ALWAYS_INLINE size_t GetEntryIndex(Entry *entry) {
|
||||||
const size_t index = entry - this->table;
|
const size_t index = entry - m_table;
|
||||||
MESOSPHERE_ASSERT(index < this->table_size);
|
MESOSPHERE_ASSERT(index < m_table_size);
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,12 +311,12 @@ namespace ams::kern {
|
||||||
if (linear_id == 0) {
|
if (linear_id == 0) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
if (index >= this->table_size) {
|
if (index >= m_table_size) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get the entry, and ensure our serial id is correct. */
|
/* Get the entry, and ensure our serial id is correct. */
|
||||||
Entry *entry = std::addressof(this->table[index]);
|
Entry *entry = std::addressof(m_table[index]);
|
||||||
if (entry->GetObject() == nullptr) {
|
if (entry->GetObject() == nullptr) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -346,12 +346,12 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
/* Index must be in bounds. */
|
/* Index must be in bounds. */
|
||||||
if (index >= this->table_size || this->table == nullptr) {
|
if (index >= m_table_size || m_table == nullptr) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ensure entry has an object. */
|
/* Ensure entry has an object. */
|
||||||
Entry *entry = std::addressof(this->table[index]);
|
Entry *entry = std::addressof(m_table[index]);
|
||||||
if (entry->GetObject() == nullptr) {
|
if (entry->GetObject() == nullptr) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,41 +24,41 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
using TimerTaskTree = util::IntrusiveRedBlackTreeBaseTraits<KTimerTask>::TreeType<KTimerTask>;
|
using TimerTaskTree = util::IntrusiveRedBlackTreeBaseTraits<KTimerTask>::TreeType<KTimerTask>;
|
||||||
private:
|
private:
|
||||||
KSpinLock lock;
|
KSpinLock m_lock;
|
||||||
TimerTaskTree task_tree;
|
TimerTaskTree m_task_tree;
|
||||||
KTimerTask *next_task;
|
KTimerTask *m_next_task;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KHardwareTimerBase() : lock(), task_tree(), next_task(nullptr) { /* ... */ }
|
constexpr ALWAYS_INLINE KHardwareTimerBase() : m_lock(), m_task_tree(), m_next_task(nullptr) { /* ... */ }
|
||||||
private:
|
private:
|
||||||
ALWAYS_INLINE void RemoveTaskFromTree(KTimerTask *task) {
|
ALWAYS_INLINE void RemoveTaskFromTree(KTimerTask *task) {
|
||||||
/* Erase from the tree. */
|
/* Erase from the tree. */
|
||||||
auto it = this->task_tree.erase(this->task_tree.iterator_to(*task));
|
auto it = m_task_tree.erase(m_task_tree.iterator_to(*task));
|
||||||
|
|
||||||
/* Clear the task's scheduled time. */
|
/* Clear the task's scheduled time. */
|
||||||
task->SetTime(0);
|
task->SetTime(0);
|
||||||
|
|
||||||
/* Update our next task if relevant. */
|
/* Update our next task if relevant. */
|
||||||
if (this->next_task == task) {
|
if (m_next_task == task) {
|
||||||
this->next_task = (it != this->task_tree.end()) ? std::addressof(*it) : nullptr;
|
m_next_task = (it != m_task_tree.end()) ? std::addressof(*it) : nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
NOINLINE void CancelTask(KTimerTask *task) {
|
NOINLINE void CancelTask(KTimerTask *task) {
|
||||||
KScopedDisableDispatch dd;
|
KScopedDisableDispatch dd;
|
||||||
KScopedSpinLock lk(this->lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
if (const s64 task_time = task->GetTime(); task_time > 0) {
|
if (const s64 task_time = task->GetTime(); task_time > 0) {
|
||||||
this->RemoveTaskFromTree(task);
|
this->RemoveTaskFromTree(task);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
protected:
|
protected:
|
||||||
ALWAYS_INLINE KSpinLock &GetLock() { return this->lock; }
|
ALWAYS_INLINE KSpinLock &GetLock() { return m_lock; }
|
||||||
|
|
||||||
ALWAYS_INLINE s64 DoInterruptTaskImpl(s64 cur_time) {
|
ALWAYS_INLINE s64 DoInterruptTaskImpl(s64 cur_time) {
|
||||||
/* We want to handle all tasks, returning the next time that a task is scheduled. */
|
/* We want to handle all tasks, returning the next time that a task is scheduled. */
|
||||||
while (true) {
|
while (true) {
|
||||||
/* Get the next task. If there isn't one, return 0. */
|
/* Get the next task. If there isn't one, return 0. */
|
||||||
KTimerTask *task = this->next_task;
|
KTimerTask *task = m_next_task;
|
||||||
if (task == nullptr) {
|
if (task == nullptr) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -81,13 +81,13 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Set the task's time, and insert it into our tree. */
|
/* Set the task's time, and insert it into our tree. */
|
||||||
task->SetTime(task_time);
|
task->SetTime(task_time);
|
||||||
this->task_tree.insert(*task);
|
m_task_tree.insert(*task);
|
||||||
|
|
||||||
/* Update our next task if relevant. */
|
/* Update our next task if relevant. */
|
||||||
if (this->next_task != nullptr && this->next_task->GetTime() <= task_time) {
|
if (m_next_task != nullptr && m_next_task->GetTime() <= task_time) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
this->next_task = task;
|
m_next_task = task;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -24,103 +24,103 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
static constexpr u32 Magic = util::FourCC<'K','I','P','1'>::Code;
|
static constexpr u32 Magic = util::FourCC<'K','I','P','1'>::Code;
|
||||||
private:
|
private:
|
||||||
u32 magic;
|
u32 m_magic;
|
||||||
u8 name[12];
|
u8 m_name[12];
|
||||||
u64 program_id;
|
u64 m_program_id;
|
||||||
u32 version;
|
u32 m_version;
|
||||||
u8 priority;
|
u8 m_priority;
|
||||||
u8 ideal_core_id;
|
u8 m_ideal_core_id;
|
||||||
u8 _1E;
|
u8 m_1E;
|
||||||
u8 flags;
|
u8 m_flags;
|
||||||
u32 rx_address;
|
u32 m_rx_address;
|
||||||
u32 rx_size;
|
u32 m_rx_size;
|
||||||
u32 rx_compressed_size;
|
u32 m_rx_compressed_size;
|
||||||
u32 affinity_mask;
|
u32 m_affinity_mask;
|
||||||
u32 ro_address;
|
u32 m_ro_address;
|
||||||
u32 ro_size;
|
u32 m_ro_size;
|
||||||
u32 ro_compressed_size;
|
u32 m_ro_compressed_size;
|
||||||
u32 stack_size;
|
u32 m_stack_size;
|
||||||
u32 rw_address;
|
u32 m_rw_address;
|
||||||
u32 rw_size;
|
u32 m_rw_size;
|
||||||
u32 rw_compressed_size;
|
u32 m_rw_compressed_size;
|
||||||
u32 _4C;
|
u32 m_4C;
|
||||||
u32 bss_address;
|
u32 m_bss_address;
|
||||||
u32 bss_size;
|
u32 m_bss_size;
|
||||||
u32 pad[(0x80 - 0x58) / sizeof(u32)];
|
u32 m_pad[(0x80 - 0x58) / sizeof(u32)];
|
||||||
u32 capabilities[0x80 / sizeof(u32)];
|
u32 m_capabilities[0x80 / sizeof(u32)];
|
||||||
public:
|
public:
|
||||||
constexpr bool IsValid() const { return this->magic == Magic; }
|
constexpr bool IsValid() const { return m_magic == Magic; }
|
||||||
|
|
||||||
constexpr void GetName(char *dst, size_t size) const {
|
constexpr void GetName(char *dst, size_t size) const {
|
||||||
std::memset(dst, 0, size);
|
std::memset(dst, 0, size);
|
||||||
std::memcpy(dst, this->name, std::min(sizeof(this->name), size));
|
std::memcpy(dst, m_name, std::min(sizeof(m_name), size));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr const u32 *GetCapabilities() const { return this->capabilities; }
|
constexpr const u32 *GetCapabilities() const { return m_capabilities; }
|
||||||
constexpr size_t GetNumCapabilities() const { return util::size(this->capabilities); }
|
constexpr size_t GetNumCapabilities() const { return util::size(m_capabilities); }
|
||||||
|
|
||||||
constexpr u64 GetProgramId() const { return this->program_id; }
|
constexpr u64 GetProgramId() const { return m_program_id; }
|
||||||
constexpr u32 GetVersion() const { return this->version; }
|
constexpr u32 GetVersion() const { return m_version; }
|
||||||
constexpr u8 GetPriority() const { return this->priority; }
|
constexpr u8 GetPriority() const { return m_priority; }
|
||||||
constexpr u8 GetIdealCoreId() const { return this->ideal_core_id; }
|
constexpr u8 GetIdealCoreId() const { return m_ideal_core_id; }
|
||||||
|
|
||||||
constexpr bool IsRxCompressed() const { return (this->flags & (1 << 0)); }
|
constexpr bool IsRxCompressed() const { return (m_flags & (1 << 0)); }
|
||||||
constexpr bool IsRoCompressed() const { return (this->flags & (1 << 1)); }
|
constexpr bool IsRoCompressed() const { return (m_flags & (1 << 1)); }
|
||||||
constexpr bool IsRwCompressed() const { return (this->flags & (1 << 2)); }
|
constexpr bool IsRwCompressed() const { return (m_flags & (1 << 2)); }
|
||||||
constexpr bool Is64Bit() const { return (this->flags & (1 << 3)); }
|
constexpr bool Is64Bit() const { return (m_flags & (1 << 3)); }
|
||||||
constexpr bool Is64BitAddressSpace() const { return (this->flags & (1 << 4)); }
|
constexpr bool Is64BitAddressSpace() const { return (m_flags & (1 << 4)); }
|
||||||
constexpr bool UsesSecureMemory() const { return (this->flags & (1 << 5)); }
|
constexpr bool UsesSecureMemory() const { return (m_flags & (1 << 5)); }
|
||||||
|
|
||||||
constexpr u32 GetRxAddress() const { return this->rx_address; }
|
constexpr u32 GetRxAddress() const { return m_rx_address; }
|
||||||
constexpr u32 GetRxSize() const { return this->rx_size; }
|
constexpr u32 GetRxSize() const { return m_rx_size; }
|
||||||
constexpr u32 GetRxCompressedSize() const { return this->rx_compressed_size; }
|
constexpr u32 GetRxCompressedSize() const { return m_rx_compressed_size; }
|
||||||
constexpr u32 GetRoAddress() const { return this->ro_address; }
|
constexpr u32 GetRoAddress() const { return m_ro_address; }
|
||||||
constexpr u32 GetRoSize() const { return this->ro_size; }
|
constexpr u32 GetRoSize() const { return m_ro_size; }
|
||||||
constexpr u32 GetRoCompressedSize() const { return this->ro_compressed_size; }
|
constexpr u32 GetRoCompressedSize() const { return m_ro_compressed_size; }
|
||||||
constexpr u32 GetRwAddress() const { return this->rw_address; }
|
constexpr u32 GetRwAddress() const { return m_rw_address; }
|
||||||
constexpr u32 GetRwSize() const { return this->rw_size; }
|
constexpr u32 GetRwSize() const { return m_rw_size; }
|
||||||
constexpr u32 GetRwCompressedSize() const { return this->rw_compressed_size; }
|
constexpr u32 GetRwCompressedSize() const { return m_rw_compressed_size; }
|
||||||
constexpr u32 GetBssAddress() const { return this->bss_address; }
|
constexpr u32 GetBssAddress() const { return m_bss_address; }
|
||||||
constexpr u32 GetBssSize() const { return this->bss_size; }
|
constexpr u32 GetBssSize() const { return m_bss_size; }
|
||||||
|
|
||||||
constexpr u32 GetAffinityMask() const { return this->affinity_mask; }
|
constexpr u32 GetAffinityMask() const { return m_affinity_mask; }
|
||||||
constexpr u32 GetStackSize() const { return this->stack_size; }
|
constexpr u32 GetStackSize() const { return m_stack_size; }
|
||||||
};
|
};
|
||||||
static_assert(sizeof(KInitialProcessHeader) == 0x100);
|
static_assert(sizeof(KInitialProcessHeader) == 0x100);
|
||||||
|
|
||||||
class KInitialProcessReader {
|
class KInitialProcessReader {
|
||||||
private:
|
private:
|
||||||
KInitialProcessHeader *kip_header;
|
KInitialProcessHeader *m_kip_header;
|
||||||
public:
|
public:
|
||||||
constexpr KInitialProcessReader() : kip_header() { /* ... */ }
|
constexpr KInitialProcessReader() : m_kip_header() { /* ... */ }
|
||||||
|
|
||||||
constexpr const u32 *GetCapabilities() const { return this->kip_header->GetCapabilities(); }
|
constexpr const u32 *GetCapabilities() const { return m_kip_header->GetCapabilities(); }
|
||||||
constexpr size_t GetNumCapabilities() const { return this->kip_header->GetNumCapabilities(); }
|
constexpr size_t GetNumCapabilities() const { return m_kip_header->GetNumCapabilities(); }
|
||||||
|
|
||||||
constexpr size_t GetBinarySize() const {
|
constexpr size_t GetBinarySize() const {
|
||||||
return sizeof(*kip_header) + this->kip_header->GetRxCompressedSize() + this->kip_header->GetRoCompressedSize() + this->kip_header->GetRwCompressedSize();
|
return sizeof(*m_kip_header) + m_kip_header->GetRxCompressedSize() + m_kip_header->GetRoCompressedSize() + m_kip_header->GetRwCompressedSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetSize() const {
|
constexpr size_t GetSize() const {
|
||||||
if (const size_t bss_size = this->kip_header->GetBssSize(); bss_size != 0) {
|
if (const size_t bss_size = m_kip_header->GetBssSize(); bss_size != 0) {
|
||||||
return this->kip_header->GetBssAddress() + this->kip_header->GetBssSize();
|
return m_kip_header->GetBssAddress() + m_kip_header->GetBssSize();
|
||||||
} else {
|
} else {
|
||||||
return this->kip_header->GetRwAddress() + this->kip_header->GetRwSize();
|
return m_kip_header->GetRwAddress() + m_kip_header->GetRwSize();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u8 GetPriority() const { return this->kip_header->GetPriority(); }
|
constexpr u8 GetPriority() const { return m_kip_header->GetPriority(); }
|
||||||
constexpr u8 GetIdealCoreId() const { return this->kip_header->GetIdealCoreId(); }
|
constexpr u8 GetIdealCoreId() const { return m_kip_header->GetIdealCoreId(); }
|
||||||
constexpr u32 GetAffinityMask() const { return this->kip_header->GetAffinityMask(); }
|
constexpr u32 GetAffinityMask() const { return m_kip_header->GetAffinityMask(); }
|
||||||
constexpr u32 GetStackSize() const { return this->kip_header->GetStackSize(); }
|
constexpr u32 GetStackSize() const { return m_kip_header->GetStackSize(); }
|
||||||
|
|
||||||
constexpr bool Is64Bit() const { return this->kip_header->Is64Bit(); }
|
constexpr bool Is64Bit() const { return m_kip_header->Is64Bit(); }
|
||||||
constexpr bool Is64BitAddressSpace() const { return this->kip_header->Is64BitAddressSpace(); }
|
constexpr bool Is64BitAddressSpace() const { return m_kip_header->Is64BitAddressSpace(); }
|
||||||
constexpr bool UsesSecureMemory() const { return this->kip_header->UsesSecureMemory(); }
|
constexpr bool UsesSecureMemory() const { return m_kip_header->UsesSecureMemory(); }
|
||||||
|
|
||||||
bool Attach(u8 *bin) {
|
bool Attach(u8 *bin) {
|
||||||
if (KInitialProcessHeader *header = reinterpret_cast<KInitialProcessHeader *>(bin); header->IsValid()) {
|
if (KInitialProcessHeader *header = reinterpret_cast<KInitialProcessHeader *>(bin); header->IsValid()) {
|
||||||
this->kip_header = header;
|
m_kip_header = header;
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -27,10 +27,10 @@ namespace ams::kern {
|
||||||
class KInterruptEvent final : public KAutoObjectWithSlabHeapAndContainer<KInterruptEvent, KReadableEvent> {
|
class KInterruptEvent final : public KAutoObjectWithSlabHeapAndContainer<KInterruptEvent, KReadableEvent> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KInterruptEvent, KReadableEvent);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KInterruptEvent, KReadableEvent);
|
||||||
private:
|
private:
|
||||||
s32 interrupt_id;
|
s32 m_interrupt_id;
|
||||||
bool is_initialized;
|
bool m_is_initialized;
|
||||||
public:
|
public:
|
||||||
constexpr KInterruptEvent() : interrupt_id(-1), is_initialized(false) { /* ... */ }
|
constexpr KInterruptEvent() : m_interrupt_id(-1), m_is_initialized(false) { /* ... */ }
|
||||||
virtual ~KInterruptEvent() { /* ... */ }
|
virtual ~KInterruptEvent() { /* ... */ }
|
||||||
|
|
||||||
Result Initialize(int32_t interrupt_name, ams::svc::InterruptType type);
|
Result Initialize(int32_t interrupt_name, ams::svc::InterruptType type);
|
||||||
|
@ -38,22 +38,22 @@ namespace ams::kern {
|
||||||
|
|
||||||
virtual Result Reset() override;
|
virtual Result Reset() override;
|
||||||
|
|
||||||
virtual bool IsInitialized() const override { return this->is_initialized; }
|
virtual bool IsInitialized() const override { return m_is_initialized; }
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
|
||||||
constexpr s32 GetInterruptId() const { return this->interrupt_id; }
|
constexpr s32 GetInterruptId() const { return m_interrupt_id; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class KInterruptEventTask : public KSlabAllocated<KInterruptEventTask>, public KInterruptTask {
|
class KInterruptEventTask : public KSlabAllocated<KInterruptEventTask>, public KInterruptTask {
|
||||||
private:
|
private:
|
||||||
KInterruptEvent *event;
|
KInterruptEvent *m_event;
|
||||||
KLightLock lock;
|
KLightLock m_lock;
|
||||||
public:
|
public:
|
||||||
constexpr KInterruptEventTask() : event(nullptr), lock() { /* ... */ }
|
constexpr KInterruptEventTask() : m_event(nullptr), m_lock() { /* ... */ }
|
||||||
~KInterruptEventTask() { /* ... */ }
|
~KInterruptEventTask() { /* ... */ }
|
||||||
|
|
||||||
KLightLock &GetLock() { return this->lock; }
|
KLightLock &GetLock() { return m_lock; }
|
||||||
|
|
||||||
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override;
|
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override;
|
||||||
virtual void DoTask() override;
|
virtual void DoTask() override;
|
||||||
|
|
|
@ -26,16 +26,16 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KInterruptTask : public KInterruptHandler {
|
class KInterruptTask : public KInterruptHandler {
|
||||||
private:
|
private:
|
||||||
KInterruptTask *next_task;
|
KInterruptTask *m_next_task;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KInterruptTask() : next_task(nullptr) { /* ... */ }
|
constexpr ALWAYS_INLINE KInterruptTask() : m_next_task(nullptr) { /* ... */ }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KInterruptTask *GetNextTask() const {
|
constexpr ALWAYS_INLINE KInterruptTask *GetNextTask() const {
|
||||||
return this->next_task;
|
return m_next_task;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetNextTask(KInterruptTask *t) {
|
constexpr ALWAYS_INLINE void SetNextTask(KInterruptTask *t) {
|
||||||
this->next_task = t;
|
m_next_task = t;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void DoTask() = 0;
|
virtual void DoTask() = 0;
|
||||||
|
|
|
@ -24,28 +24,28 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
class TaskQueue {
|
class TaskQueue {
|
||||||
private:
|
private:
|
||||||
KInterruptTask *head;
|
KInterruptTask *m_head;
|
||||||
KInterruptTask *tail;
|
KInterruptTask *m_tail;
|
||||||
public:
|
public:
|
||||||
constexpr TaskQueue() : head(nullptr), tail(nullptr) { /* ... */ }
|
constexpr TaskQueue() : m_head(nullptr), m_tail(nullptr) { /* ... */ }
|
||||||
|
|
||||||
constexpr KInterruptTask *GetHead() { return this->head; }
|
constexpr KInterruptTask *GetHead() { return m_head; }
|
||||||
constexpr bool IsEmpty() const { return this->head == nullptr; }
|
constexpr bool IsEmpty() const { return m_head == nullptr; }
|
||||||
constexpr void Clear() { this->head = nullptr; this->tail = nullptr; }
|
constexpr void Clear() { m_head = nullptr; m_tail = nullptr; }
|
||||||
|
|
||||||
void Enqueue(KInterruptTask *task);
|
void Enqueue(KInterruptTask *task);
|
||||||
void Dequeue();
|
void Dequeue();
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
TaskQueue task_queue;
|
TaskQueue m_task_queue;
|
||||||
KThread *thread;
|
KThread *m_thread;
|
||||||
private:
|
private:
|
||||||
static void ThreadFunction(uintptr_t arg);
|
static void ThreadFunction(uintptr_t arg);
|
||||||
void ThreadFunctionImpl();
|
void ThreadFunctionImpl();
|
||||||
public:
|
public:
|
||||||
constexpr KInterruptTaskManager() : task_queue(), thread(nullptr) { /* ... */ }
|
constexpr KInterruptTaskManager() : m_task_queue(), m_thread(nullptr) { /* ... */ }
|
||||||
|
|
||||||
constexpr KThread *GetThread() const { return this->thread; }
|
constexpr KThread *GetThread() const { return m_thread; }
|
||||||
|
|
||||||
NOINLINE void Initialize();
|
NOINLINE void Initialize();
|
||||||
void EnqueueTask(KInterruptTask *task);
|
void EnqueueTask(KInterruptTask *task);
|
||||||
|
|
|
@ -24,20 +24,20 @@ namespace ams::kern {
|
||||||
class KLightClientSession final : public KAutoObjectWithSlabHeapAndContainer<KLightClientSession, KAutoObjectWithList> {
|
class KLightClientSession final : public KAutoObjectWithSlabHeapAndContainer<KLightClientSession, KAutoObjectWithList> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KLightClientSession, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KLightClientSession, KAutoObject);
|
||||||
private:
|
private:
|
||||||
KLightSession *parent;
|
KLightSession *m_parent;
|
||||||
public:
|
public:
|
||||||
constexpr KLightClientSession() : parent() { /* ... */ }
|
constexpr KLightClientSession() : m_parent() { /* ... */ }
|
||||||
virtual ~KLightClientSession() { /* ... */ }
|
virtual ~KLightClientSession() { /* ... */ }
|
||||||
|
|
||||||
void Initialize(KLightSession *parent) {
|
void Initialize(KLightSession *parent) {
|
||||||
/* Set member variables. */
|
/* Set member variables. */
|
||||||
this->parent = parent;
|
m_parent = parent;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void Destroy() override;
|
virtual void Destroy() override;
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
|
||||||
constexpr const KLightSession *GetParent() const { return this->parent; }
|
constexpr const KLightSession *GetParent() const { return m_parent; }
|
||||||
|
|
||||||
Result SendSyncRequest(u32 *data);
|
Result SendSyncRequest(u32 *data);
|
||||||
|
|
||||||
|
|
|
@ -24,9 +24,9 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KLightConditionVariable {
|
class KLightConditionVariable {
|
||||||
private:
|
private:
|
||||||
KThreadQueue thread_queue;
|
KThreadQueue m_thread_queue;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KLightConditionVariable() : thread_queue() { /* ... */ }
|
constexpr ALWAYS_INLINE KLightConditionVariable() : m_thread_queue() { /* ... */ }
|
||||||
private:
|
private:
|
||||||
void WaitImpl(KLightLock *lock, s64 timeout) {
|
void WaitImpl(KLightLock *lock, s64 timeout) {
|
||||||
KThread *owner = GetCurrentThreadPointer();
|
KThread *owner = GetCurrentThreadPointer();
|
||||||
|
@ -37,7 +37,7 @@ namespace ams::kern {
|
||||||
KScopedSchedulerLockAndSleep lk(&timer, owner, timeout);
|
KScopedSchedulerLockAndSleep lk(&timer, owner, timeout);
|
||||||
lock->Unlock();
|
lock->Unlock();
|
||||||
|
|
||||||
if (!this->thread_queue.SleepThread(owner)) {
|
if (!m_thread_queue.SleepThread(owner)) {
|
||||||
lk.CancelSleep();
|
lk.CancelSleep();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
void Broadcast() {
|
void Broadcast() {
|
||||||
KScopedSchedulerLock lk;
|
KScopedSchedulerLock lk;
|
||||||
while (this->thread_queue.WakeupFrontThread() != nullptr) {
|
while (m_thread_queue.WakeupFrontThread() != nullptr) {
|
||||||
/* We want to signal all threads, and so should continue waking up until there's nothing to wake. */
|
/* We want to signal all threads, and so should continue waking up until there's nothing to wake. */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,9 +23,9 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KLightLock {
|
class KLightLock {
|
||||||
private:
|
private:
|
||||||
std::atomic<uintptr_t> tag;
|
std::atomic<uintptr_t> m_tag;
|
||||||
public:
|
public:
|
||||||
constexpr KLightLock() : tag(0) { /* ... */ }
|
constexpr KLightLock() : m_tag(0) { /* ... */ }
|
||||||
|
|
||||||
void Lock() {
|
void Lock() {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
@ -34,9 +34,9 @@ namespace ams::kern {
|
||||||
const uintptr_t cur_thread_tag = (cur_thread | 1);
|
const uintptr_t cur_thread_tag = (cur_thread | 1);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
uintptr_t old_tag = this->tag.load(std::memory_order_relaxed);
|
uintptr_t old_tag = m_tag.load(std::memory_order_relaxed);
|
||||||
|
|
||||||
while (!this->tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) {
|
while (!m_tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) {
|
||||||
if ((old_tag | 1) == cur_thread_tag) {
|
if ((old_tag | 1) == cur_thread_tag) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -59,14 +59,14 @@ namespace ams::kern {
|
||||||
if (expected != cur_thread) {
|
if (expected != cur_thread) {
|
||||||
return this->UnlockSlowPath(cur_thread);
|
return this->UnlockSlowPath(cur_thread);
|
||||||
}
|
}
|
||||||
} while (!this->tag.compare_exchange_weak(expected, 0, std::memory_order_release));
|
} while (!m_tag.compare_exchange_weak(expected, 0, std::memory_order_release));
|
||||||
}
|
}
|
||||||
|
|
||||||
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
||||||
void UnlockSlowPath(uintptr_t cur_thread);
|
void UnlockSlowPath(uintptr_t cur_thread);
|
||||||
|
|
||||||
bool IsLocked() const { return this->tag != 0; }
|
bool IsLocked() const { return m_tag != 0; }
|
||||||
bool IsLockedByCurrentThread() const { return (this->tag | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); }
|
bool IsLockedByCurrentThread() const { return (m_tag | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); }
|
||||||
};
|
};
|
||||||
|
|
||||||
using KScopedLightLock = KScopedLock<KLightLock>;
|
using KScopedLightLock = KScopedLock<KLightLock>;
|
||||||
|
|
|
@ -26,24 +26,24 @@ namespace ams::kern {
|
||||||
class KLightServerSession final : public KAutoObjectWithSlabHeapAndContainer<KLightServerSession, KAutoObjectWithList>, public util::IntrusiveListBaseNode<KLightServerSession> {
|
class KLightServerSession final : public KAutoObjectWithSlabHeapAndContainer<KLightServerSession, KAutoObjectWithList>, public util::IntrusiveListBaseNode<KLightServerSession> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KLightServerSession, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KLightServerSession, KAutoObject);
|
||||||
private:
|
private:
|
||||||
KLightSession *parent;
|
KLightSession *m_parent;
|
||||||
KThreadQueue request_queue;
|
KThreadQueue m_request_queue;
|
||||||
KThreadQueue server_queue;
|
KThreadQueue m_server_queue;
|
||||||
KThread *current_request;
|
KThread *m_current_request;
|
||||||
KThread *server_thread;
|
KThread *m_server_thread;
|
||||||
public:
|
public:
|
||||||
constexpr KLightServerSession() : parent(), request_queue(), server_queue(), current_request(), server_thread() { /* ... */ }
|
constexpr KLightServerSession() : m_parent(), m_request_queue(), m_server_queue(), m_current_request(), m_server_thread() { /* ... */ }
|
||||||
virtual ~KLightServerSession() { /* ... */ }
|
virtual ~KLightServerSession() { /* ... */ }
|
||||||
|
|
||||||
void Initialize(KLightSession *parent) {
|
void Initialize(KLightSession *parent) {
|
||||||
/* Set member variables. */
|
/* Set member variables. */
|
||||||
this->parent = parent;
|
m_parent = parent;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void Destroy() override;
|
virtual void Destroy() override;
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
|
||||||
constexpr const KLightSession *GetParent() const { return this->parent; }
|
constexpr const KLightSession *GetParent() const { return m_parent; }
|
||||||
|
|
||||||
Result OnRequest(KThread *request_thread);
|
Result OnRequest(KThread *request_thread);
|
||||||
Result ReplyAndReceive(u32 *data);
|
Result ReplyAndReceive(u32 *data);
|
||||||
|
|
|
@ -38,16 +38,16 @@ namespace ams::kern {
|
||||||
static constexpr size_t DataSize = sizeof(u32) * 7;
|
static constexpr size_t DataSize = sizeof(u32) * 7;
|
||||||
static constexpr u32 ReplyFlag = (1u << (BITSIZEOF(u32) - 1));
|
static constexpr u32 ReplyFlag = (1u << (BITSIZEOF(u32) - 1));
|
||||||
private:
|
private:
|
||||||
KLightServerSession server;
|
KLightServerSession m_server;
|
||||||
KLightClientSession client;
|
KLightClientSession m_client;
|
||||||
State state;
|
State m_state;
|
||||||
KClientPort *port;
|
KClientPort *m_port;
|
||||||
uintptr_t name;
|
uintptr_t m_name;
|
||||||
KProcess *process;
|
KProcess *m_process;
|
||||||
bool initialized;
|
bool m_initialized;
|
||||||
public:
|
public:
|
||||||
constexpr KLightSession()
|
constexpr KLightSession()
|
||||||
: server(), client(), state(State::Invalid), port(), name(), process(), initialized()
|
: m_server(), m_client(), m_state(State::Invalid), m_port(), m_name(), m_process(), m_initialized()
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
@ -57,23 +57,23 @@ namespace ams::kern {
|
||||||
void Initialize(KClientPort *client_port, uintptr_t name);
|
void Initialize(KClientPort *client_port, uintptr_t name);
|
||||||
virtual void Finalize() override;
|
virtual void Finalize() override;
|
||||||
|
|
||||||
virtual bool IsInitialized() const override { return this->initialized; }
|
virtual bool IsInitialized() const override { return m_initialized; }
|
||||||
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(this->process); }
|
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(m_process); }
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg);
|
static void PostDestroy(uintptr_t arg);
|
||||||
|
|
||||||
void OnServerClosed();
|
void OnServerClosed();
|
||||||
void OnClientClosed();
|
void OnClientClosed();
|
||||||
|
|
||||||
bool IsServerClosed() const { return this->state != State::Normal; }
|
bool IsServerClosed() const { return m_state != State::Normal; }
|
||||||
bool IsClientClosed() const { return this->state != State::Normal; }
|
bool IsClientClosed() const { return m_state != State::Normal; }
|
||||||
|
|
||||||
Result OnRequest(KThread *request_thread) { return this->server.OnRequest(request_thread); }
|
Result OnRequest(KThread *request_thread) { return m_server.OnRequest(request_thread); }
|
||||||
|
|
||||||
KLightClientSession &GetClientSession() { return this->client; }
|
KLightClientSession &GetClientSession() { return m_client; }
|
||||||
KLightServerSession &GetServerSession() { return this->server; }
|
KLightServerSession &GetServerSession() { return m_server; }
|
||||||
const KLightClientSession &GetClientSession() const { return this->client; }
|
const KLightClientSession &GetClientSession() const { return m_client; }
|
||||||
const KLightServerSession &GetServerSession() const { return this->server; }
|
const KLightServerSession &GetServerSession() const { return m_server; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,17 +22,17 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KLinkedListNode : public util::IntrusiveListBaseNode<KLinkedListNode>, public KSlabAllocated<KLinkedListNode> {
|
class KLinkedListNode : public util::IntrusiveListBaseNode<KLinkedListNode>, public KSlabAllocated<KLinkedListNode> {
|
||||||
private:
|
private:
|
||||||
void *item;
|
void *m_item;
|
||||||
public:
|
public:
|
||||||
constexpr KLinkedListNode() : util::IntrusiveListBaseNode<KLinkedListNode>(), item(nullptr) { MESOSPHERE_ASSERT_THIS(); }
|
constexpr KLinkedListNode() : util::IntrusiveListBaseNode<KLinkedListNode>(), m_item(nullptr) { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
||||||
constexpr void Initialize(void *it) {
|
constexpr void Initialize(void *it) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
this->item = it;
|
m_item = it;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void *GetItem() const {
|
constexpr void *GetItem() const {
|
||||||
return this->item;
|
return m_item;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
static_assert(sizeof(KLinkedListNode) == sizeof(util::IntrusiveListNode) + sizeof(void *));
|
static_assert(sizeof(KLinkedListNode) == sizeof(util::IntrusiveListNode) + sizeof(void *));
|
||||||
|
@ -69,16 +69,16 @@ namespace ams::kern {
|
||||||
using pointer = typename std::conditional<Const, KLinkedList::const_pointer, KLinkedList::pointer>::type;
|
using pointer = typename std::conditional<Const, KLinkedList::const_pointer, KLinkedList::pointer>::type;
|
||||||
using reference = typename std::conditional<Const, KLinkedList::const_reference, KLinkedList::reference>::type;
|
using reference = typename std::conditional<Const, KLinkedList::const_reference, KLinkedList::reference>::type;
|
||||||
private:
|
private:
|
||||||
BaseIterator base_it;
|
BaseIterator m_base_it;
|
||||||
public:
|
public:
|
||||||
explicit Iterator(BaseIterator it) : base_it(it) { /* ... */ }
|
explicit Iterator(BaseIterator it) : m_base_it(it) { /* ... */ }
|
||||||
|
|
||||||
pointer GetItem() const {
|
pointer GetItem() const {
|
||||||
return static_cast<pointer>(this->base_it->GetItem());
|
return static_cast<pointer>(m_base_it->GetItem());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool operator==(const Iterator &rhs) const {
|
bool operator==(const Iterator &rhs) const {
|
||||||
return this->base_it == rhs.base_it;
|
return m_base_it == rhs.m_base_it;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool operator!=(const Iterator &rhs) const {
|
bool operator!=(const Iterator &rhs) const {
|
||||||
|
@ -94,12 +94,12 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator &operator++() {
|
Iterator &operator++() {
|
||||||
++this->base_it;
|
++m_base_it;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator &operator--() {
|
Iterator &operator--() {
|
||||||
--this->base_it;
|
--m_base_it;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
operator Iterator<true>() const {
|
operator Iterator<true>() const {
|
||||||
return Iterator<true>(this->base_it);
|
return Iterator<true>(m_base_it);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
public:
|
public:
|
||||||
|
@ -205,7 +205,7 @@ namespace ams::kern {
|
||||||
KLinkedListNode *node = KLinkedListNode::Allocate();
|
KLinkedListNode *node = KLinkedListNode::Allocate();
|
||||||
MESOSPHERE_ABORT_UNLESS(node != nullptr);
|
MESOSPHERE_ABORT_UNLESS(node != nullptr);
|
||||||
node->Initialize(std::addressof(ref));
|
node->Initialize(std::addressof(ref));
|
||||||
return iterator(BaseList::insert(pos.base_it, *node));
|
return iterator(BaseList::insert(pos.m_base_it, *node));
|
||||||
}
|
}
|
||||||
|
|
||||||
void push_back(reference ref) {
|
void push_back(reference ref) {
|
||||||
|
@ -225,8 +225,8 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator erase(const iterator pos) {
|
iterator erase(const iterator pos) {
|
||||||
KLinkedListNode *freed_node = std::addressof(*pos.base_it);
|
KLinkedListNode *freed_node = std::addressof(*pos.m_base_it);
|
||||||
iterator ret = iterator(BaseList::erase(pos.base_it));
|
iterator ret = iterator(BaseList::erase(pos.m_base_it));
|
||||||
KLinkedListNode::Free(freed_node);
|
KLinkedListNode::Free(freed_node);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -189,38 +189,38 @@ namespace ams::kern {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct KMemoryInfo {
|
struct KMemoryInfo {
|
||||||
uintptr_t address;
|
uintptr_t m_address;
|
||||||
size_t size;
|
size_t m_size;
|
||||||
KMemoryState state;
|
KMemoryState m_state;
|
||||||
u16 device_disable_merge_left_count;
|
u16 m_device_disable_merge_left_count;
|
||||||
u16 device_disable_merge_right_count;
|
u16 m_device_disable_merge_right_count;
|
||||||
u16 ipc_lock_count;
|
u16 m_ipc_lock_count;
|
||||||
u16 device_use_count;
|
u16 m_device_use_count;
|
||||||
u16 ipc_disable_merge_count;
|
u16 m_ipc_disable_merge_count;
|
||||||
KMemoryPermission perm;
|
KMemoryPermission m_perm;
|
||||||
KMemoryAttribute attribute;
|
KMemoryAttribute m_attribute;
|
||||||
KMemoryPermission original_perm;
|
KMemoryPermission m_original_perm;
|
||||||
KMemoryBlockDisableMergeAttribute disable_merge_attribute;
|
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
|
||||||
|
|
||||||
constexpr ams::svc::MemoryInfo GetSvcMemoryInfo() const {
|
constexpr ams::svc::MemoryInfo GetSvcMemoryInfo() const {
|
||||||
return {
|
return {
|
||||||
.addr = this->address,
|
.addr = m_address,
|
||||||
.size = this->size,
|
.size = m_size,
|
||||||
.state = static_cast<ams::svc::MemoryState>(this->state & KMemoryState_Mask),
|
.state = static_cast<ams::svc::MemoryState>(m_state & KMemoryState_Mask),
|
||||||
.attr = static_cast<ams::svc::MemoryAttribute>(this->attribute & KMemoryAttribute_UserMask),
|
.attr = static_cast<ams::svc::MemoryAttribute>(m_attribute & KMemoryAttribute_UserMask),
|
||||||
.perm = static_cast<ams::svc::MemoryPermission>(this->perm & KMemoryPermission_UserMask),
|
.perm = static_cast<ams::svc::MemoryPermission>(m_perm & KMemoryPermission_UserMask),
|
||||||
.ipc_refcount = this->ipc_lock_count,
|
.ipc_refcount = m_ipc_lock_count,
|
||||||
.device_refcount = this->device_use_count,
|
.device_refcount = m_device_use_count,
|
||||||
.padding = {},
|
.padding = {},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr uintptr_t GetAddress() const {
|
constexpr uintptr_t GetAddress() const {
|
||||||
return this->address;
|
return m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetSize() const {
|
constexpr size_t GetSize() const {
|
||||||
return this->size;
|
return m_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetNumPages() const {
|
constexpr size_t GetNumPages() const {
|
||||||
|
@ -236,48 +236,48 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u16 GetIpcLockCount() const {
|
constexpr u16 GetIpcLockCount() const {
|
||||||
return this->ipc_lock_count;
|
return m_ipc_lock_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u16 GetIpcDisableMergeCount() const {
|
constexpr u16 GetIpcDisableMergeCount() const {
|
||||||
return this->ipc_disable_merge_count;
|
return m_ipc_disable_merge_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryState GetState() const {
|
constexpr KMemoryState GetState() const {
|
||||||
return this->state;
|
return m_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryPermission GetPermission() const {
|
constexpr KMemoryPermission GetPermission() const {
|
||||||
return this->perm;
|
return m_perm;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryPermission GetOriginalPermission() const {
|
constexpr KMemoryPermission GetOriginalPermission() const {
|
||||||
return this->original_perm;
|
return m_original_perm;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryAttribute GetAttribute() const {
|
constexpr KMemoryAttribute GetAttribute() const {
|
||||||
return this->attribute;
|
return m_attribute;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
|
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
|
||||||
return this->disable_merge_attribute;
|
return m_disable_merge_attribute;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
|
class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
|
||||||
private:
|
private:
|
||||||
u16 device_disable_merge_left_count;
|
u16 m_device_disable_merge_left_count;
|
||||||
u16 device_disable_merge_right_count;
|
u16 m_device_disable_merge_right_count;
|
||||||
KProcessAddress address;
|
KProcessAddress m_address;
|
||||||
size_t num_pages;
|
size_t m_num_pages;
|
||||||
KMemoryState memory_state;
|
KMemoryState m_memory_state;
|
||||||
u16 ipc_lock_count;
|
u16 m_ipc_lock_count;
|
||||||
u16 device_use_count;
|
u16 m_device_use_count;
|
||||||
u16 ipc_disable_merge_count;
|
u16 m_ipc_disable_merge_count;
|
||||||
KMemoryPermission perm;
|
KMemoryPermission m_perm;
|
||||||
KMemoryPermission original_perm;
|
KMemoryPermission m_original_perm;
|
||||||
KMemoryAttribute attribute;
|
KMemoryAttribute m_attribute;
|
||||||
KMemoryBlockDisableMergeAttribute disable_merge_attribute;
|
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
|
||||||
public:
|
public:
|
||||||
static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) {
|
static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) {
|
||||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||||
|
@ -290,11 +290,11 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KProcessAddress GetAddress() const {
|
constexpr KProcessAddress GetAddress() const {
|
||||||
return this->address;
|
return m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetNumPages() const {
|
constexpr size_t GetNumPages() const {
|
||||||
return this->num_pages;
|
return m_num_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetSize() const {
|
constexpr size_t GetSize() const {
|
||||||
|
@ -310,87 +310,87 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u16 GetIpcLockCount() const {
|
constexpr u16 GetIpcLockCount() const {
|
||||||
return this->ipc_lock_count;
|
return m_ipc_lock_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u16 GetIpcDisableMergeCount() const {
|
constexpr u16 GetIpcDisableMergeCount() const {
|
||||||
return this->ipc_disable_merge_count;
|
return m_ipc_disable_merge_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryPermission GetPermission() const {
|
constexpr KMemoryPermission GetPermission() const {
|
||||||
return this->perm;
|
return m_perm;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryPermission GetOriginalPermission() const {
|
constexpr KMemoryPermission GetOriginalPermission() const {
|
||||||
return this->original_perm;
|
return m_original_perm;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryAttribute GetAttribute() const {
|
constexpr KMemoryAttribute GetAttribute() const {
|
||||||
return this->attribute;
|
return m_attribute;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryInfo GetMemoryInfo() const {
|
constexpr KMemoryInfo GetMemoryInfo() const {
|
||||||
return {
|
return {
|
||||||
.address = GetInteger(this->GetAddress()),
|
.m_address = GetInteger(this->GetAddress()),
|
||||||
.size = this->GetSize(),
|
.m_size = this->GetSize(),
|
||||||
.state = this->memory_state,
|
.m_state = m_memory_state,
|
||||||
.device_disable_merge_left_count = this->device_disable_merge_left_count,
|
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
|
||||||
.device_disable_merge_right_count = this->device_disable_merge_right_count,
|
.m_device_disable_merge_right_count = m_device_disable_merge_right_count,
|
||||||
.ipc_lock_count = this->ipc_lock_count,
|
.m_ipc_lock_count = m_ipc_lock_count,
|
||||||
.device_use_count = this->device_use_count,
|
.m_device_use_count = m_device_use_count,
|
||||||
.ipc_disable_merge_count = this->ipc_disable_merge_count,
|
.m_ipc_disable_merge_count = m_ipc_disable_merge_count,
|
||||||
.perm = this->perm,
|
.m_perm = m_perm,
|
||||||
.attribute = this->attribute,
|
.m_attribute = m_attribute,
|
||||||
.original_perm = this->original_perm,
|
.m_original_perm = m_original_perm,
|
||||||
.disable_merge_attribute = this->disable_merge_attribute,
|
.m_disable_merge_attribute = m_disable_merge_attribute,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KMemoryBlock()
|
constexpr KMemoryBlock()
|
||||||
: device_disable_merge_left_count(), device_disable_merge_right_count(), address(), num_pages(), memory_state(KMemoryState_None), ipc_lock_count(), device_use_count(), ipc_disable_merge_count(), perm(), original_perm(), attribute(), disable_merge_attribute()
|
: m_device_disable_merge_left_count(), m_device_disable_merge_right_count(), m_address(), m_num_pages(), m_memory_state(KMemoryState_None), m_ipc_lock_count(), m_device_use_count(), m_ipc_disable_merge_count(), m_perm(), m_original_perm(), m_attribute(), m_disable_merge_attribute()
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryBlock(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr)
|
constexpr KMemoryBlock(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr)
|
||||||
: device_disable_merge_left_count(), device_disable_merge_right_count(), address(addr), num_pages(np), memory_state(ms), ipc_lock_count(0), device_use_count(0), ipc_disable_merge_count(), perm(p), original_perm(KMemoryPermission_None), attribute(attr), disable_merge_attribute()
|
: m_device_disable_merge_left_count(), m_device_disable_merge_right_count(), m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0), m_device_use_count(0), m_ipc_disable_merge_count(), m_perm(p), m_original_perm(KMemoryPermission_None), m_attribute(attr), m_disable_merge_attribute()
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) {
|
constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
this->address = addr;
|
m_address = addr;
|
||||||
this->num_pages = np;
|
m_num_pages = np;
|
||||||
this->memory_state = ms;
|
m_memory_state = ms;
|
||||||
this->ipc_lock_count = 0;
|
m_ipc_lock_count = 0;
|
||||||
this->device_use_count = 0;
|
m_device_use_count = 0;
|
||||||
this->perm = p;
|
m_perm = p;
|
||||||
this->original_perm = KMemoryPermission_None;
|
m_original_perm = KMemoryPermission_None;
|
||||||
this->attribute = attr;
|
m_attribute = attr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
|
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
constexpr auto AttributeIgnoreMask = KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
|
constexpr auto AttributeIgnoreMask = KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
|
||||||
return this->memory_state == s && this->perm == p && (this->attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
return m_memory_state == s && m_perm == p && (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool HasSameProperties(const KMemoryBlock &rhs) const {
|
constexpr bool HasSameProperties(const KMemoryBlock &rhs) const {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
return this->memory_state == rhs.memory_state &&
|
return m_memory_state == rhs.m_memory_state &&
|
||||||
this->perm == rhs.perm &&
|
m_perm == rhs.m_perm &&
|
||||||
this->original_perm == rhs.original_perm &&
|
m_original_perm == rhs.m_original_perm &&
|
||||||
this->attribute == rhs.attribute &&
|
m_attribute == rhs.m_attribute &&
|
||||||
this->ipc_lock_count == rhs.ipc_lock_count &&
|
m_ipc_lock_count == rhs.m_ipc_lock_count &&
|
||||||
this->device_use_count == rhs.device_use_count;
|
m_device_use_count == rhs.m_device_use_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool CanMergeWith(const KMemoryBlock &rhs) const {
|
constexpr bool CanMergeWith(const KMemoryBlock &rhs) const {
|
||||||
return this->HasSameProperties(rhs) &&
|
return this->HasSameProperties(rhs) &&
|
||||||
(this->disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllRight) == 0 &&
|
(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllRight) == 0 &&
|
||||||
(rhs.disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllLeft) == 0;
|
(rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllLeft) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool Contains(KProcessAddress addr) const {
|
constexpr bool Contains(KProcessAddress addr) const {
|
||||||
|
@ -404,25 +404,25 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(added_block.GetNumPages() > 0);
|
MESOSPHERE_ASSERT(added_block.GetNumPages() > 0);
|
||||||
MESOSPHERE_ASSERT(this->GetAddress() + added_block.GetSize() - 1 < this->GetEndAddress() + added_block.GetSize() - 1);
|
MESOSPHERE_ASSERT(this->GetAddress() + added_block.GetSize() - 1 < this->GetEndAddress() + added_block.GetSize() - 1);
|
||||||
|
|
||||||
this->num_pages += added_block.GetNumPages();
|
m_num_pages += added_block.GetNumPages();
|
||||||
this->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute | added_block.disable_merge_attribute);
|
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute | added_block.m_disable_merge_attribute);
|
||||||
this->device_disable_merge_right_count = added_block.device_disable_merge_right_count;
|
m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a, bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
|
constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a, bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
MESOSPHERE_ASSERT(this->original_perm == KMemoryPermission_None);
|
MESOSPHERE_ASSERT(m_original_perm == KMemoryPermission_None);
|
||||||
MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_IpcLocked) == 0);
|
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_IpcLocked) == 0);
|
||||||
|
|
||||||
this->memory_state = s;
|
m_memory_state = s;
|
||||||
this->perm = p;
|
m_perm = p;
|
||||||
this->attribute = static_cast<KMemoryAttribute>(a | (this->attribute & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)));
|
m_attribute = static_cast<KMemoryAttribute>(a | (m_attribute & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)));
|
||||||
|
|
||||||
if (set_disable_merge_attr && set_mask != 0) {
|
if (set_disable_merge_attr && set_mask != 0) {
|
||||||
this->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute | set_mask);
|
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute | set_mask);
|
||||||
}
|
}
|
||||||
if (clear_mask != 0) {
|
if (clear_mask != 0) {
|
||||||
this->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute & ~clear_mask);
|
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & ~clear_mask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -432,25 +432,25 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(this->Contains(addr));
|
MESOSPHERE_ASSERT(this->Contains(addr));
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), PageSize));
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), PageSize));
|
||||||
|
|
||||||
block->address = this->address;
|
block->m_address = m_address;
|
||||||
block->num_pages = (addr - this->GetAddress()) / PageSize;
|
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
|
||||||
block->memory_state = this->memory_state;
|
block->m_memory_state = m_memory_state;
|
||||||
block->ipc_lock_count = this->ipc_lock_count;
|
block->m_ipc_lock_count = m_ipc_lock_count;
|
||||||
block->device_use_count = this->device_use_count;
|
block->m_device_use_count = m_device_use_count;
|
||||||
block->perm = this->perm;
|
block->m_perm = m_perm;
|
||||||
block->original_perm = this->original_perm;
|
block->m_original_perm = m_original_perm;
|
||||||
block->attribute = this->attribute;
|
block->m_attribute = m_attribute;
|
||||||
block->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllLeft);
|
block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllLeft);
|
||||||
block->ipc_disable_merge_count = this->ipc_disable_merge_count;
|
block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
|
||||||
block->device_disable_merge_left_count = this->device_disable_merge_left_count;
|
block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
|
||||||
block->device_disable_merge_right_count = 0;
|
block->m_device_disable_merge_right_count = 0;
|
||||||
|
|
||||||
this->address = addr;
|
m_address = addr;
|
||||||
this->num_pages -= block->num_pages;
|
m_num_pages -= block->m_num_pages;
|
||||||
|
|
||||||
this->ipc_disable_merge_count = 0;
|
m_ipc_disable_merge_count = 0;
|
||||||
this->device_disable_merge_left_count = 0;
|
m_device_disable_merge_left_count = 0;
|
||||||
this->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllRight);
|
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllRight);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void UpdateDeviceDisableMergeStateForShareLeft(KMemoryPermission new_perm, bool left, bool right) {
|
constexpr void UpdateDeviceDisableMergeStateForShareLeft(KMemoryPermission new_perm, bool left, bool right) {
|
||||||
|
@ -458,8 +458,8 @@ namespace ams::kern {
|
||||||
MESOSPHERE_UNUSED(new_perm, right);
|
MESOSPHERE_UNUSED(new_perm, right);
|
||||||
|
|
||||||
if (left) {
|
if (left) {
|
||||||
this->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute | KMemoryBlockDisableMergeAttribute_DeviceLeft);
|
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute_DeviceLeft);
|
||||||
const u16 new_device_disable_merge_left_count = ++this->device_disable_merge_left_count;
|
const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
|
||||||
MESOSPHERE_ABORT_UNLESS(new_device_disable_merge_left_count > 0);
|
MESOSPHERE_ABORT_UNLESS(new_device_disable_merge_left_count > 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -469,8 +469,8 @@ namespace ams::kern {
|
||||||
MESOSPHERE_UNUSED(new_perm, left);
|
MESOSPHERE_UNUSED(new_perm, left);
|
||||||
|
|
||||||
if (right) {
|
if (right) {
|
||||||
this->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute | KMemoryBlockDisableMergeAttribute_DeviceRight);
|
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute_DeviceRight);
|
||||||
const u16 new_device_disable_merge_right_count = ++this->device_disable_merge_right_count;
|
const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
|
||||||
MESOSPHERE_ABORT_UNLESS(new_device_disable_merge_right_count > 0);
|
MESOSPHERE_ABORT_UNLESS(new_device_disable_merge_right_count > 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -485,13 +485,13 @@ namespace ams::kern {
|
||||||
MESOSPHERE_UNUSED(new_perm);
|
MESOSPHERE_UNUSED(new_perm);
|
||||||
|
|
||||||
/* We must either be shared or have a zero lock count. */
|
/* We must either be shared or have a zero lock count. */
|
||||||
MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared || this->device_use_count == 0);
|
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared || m_device_use_count == 0);
|
||||||
|
|
||||||
/* Share. */
|
/* Share. */
|
||||||
const u16 new_count = ++this->device_use_count;
|
const u16 new_count = ++m_device_use_count;
|
||||||
MESOSPHERE_ABORT_UNLESS(new_count > 0);
|
MESOSPHERE_ABORT_UNLESS(new_count > 0);
|
||||||
|
|
||||||
this->attribute = static_cast<KMemoryAttribute>(this->attribute | KMemoryAttribute_DeviceShared);
|
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute_DeviceShared);
|
||||||
|
|
||||||
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
|
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
|
||||||
}
|
}
|
||||||
|
@ -501,16 +501,16 @@ namespace ams::kern {
|
||||||
MESOSPHERE_UNUSED(new_perm, right);
|
MESOSPHERE_UNUSED(new_perm, right);
|
||||||
|
|
||||||
if (left) {
|
if (left) {
|
||||||
if (!this->device_disable_merge_left_count) {
|
if (!m_device_disable_merge_left_count) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
--this->device_disable_merge_left_count;
|
--m_device_disable_merge_left_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
this->device_disable_merge_left_count = std::min(this->device_disable_merge_left_count, this->device_use_count);
|
m_device_disable_merge_left_count = std::min(m_device_disable_merge_left_count, m_device_use_count);
|
||||||
|
|
||||||
if (this->device_disable_merge_left_count == 0) {
|
if (m_device_disable_merge_left_count == 0) {
|
||||||
this->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_DeviceLeft);
|
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_DeviceLeft);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -519,10 +519,10 @@ namespace ams::kern {
|
||||||
MESOSPHERE_UNUSED(new_perm, left);
|
MESOSPHERE_UNUSED(new_perm, left);
|
||||||
|
|
||||||
if (right) {
|
if (right) {
|
||||||
const u16 old_device_disable_merge_right_count = this->device_disable_merge_right_count--;
|
const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
|
||||||
MESOSPHERE_ASSERT(old_device_disable_merge_right_count > 0);
|
MESOSPHERE_ASSERT(old_device_disable_merge_right_count > 0);
|
||||||
if (old_device_disable_merge_right_count == 1) {
|
if (old_device_disable_merge_right_count == 1) {
|
||||||
this->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_DeviceRight);
|
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_DeviceRight);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -537,14 +537,14 @@ namespace ams::kern {
|
||||||
MESOSPHERE_UNUSED(new_perm);
|
MESOSPHERE_UNUSED(new_perm);
|
||||||
|
|
||||||
/* We must be shared. */
|
/* We must be shared. */
|
||||||
MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared);
|
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared);
|
||||||
|
|
||||||
/* Unhare. */
|
/* Unhare. */
|
||||||
const u16 old_count = this->device_use_count--;
|
const u16 old_count = m_device_use_count--;
|
||||||
MESOSPHERE_ABORT_UNLESS(old_count > 0);
|
MESOSPHERE_ABORT_UNLESS(old_count > 0);
|
||||||
|
|
||||||
if (old_count == 1) {
|
if (old_count == 1) {
|
||||||
this->attribute = static_cast<KMemoryAttribute>(this->attribute & ~KMemoryAttribute_DeviceShared);
|
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute_DeviceShared);
|
||||||
}
|
}
|
||||||
|
|
||||||
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
|
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
|
||||||
|
@ -555,14 +555,14 @@ namespace ams::kern {
|
||||||
MESOSPHERE_UNUSED(new_perm);
|
MESOSPHERE_UNUSED(new_perm);
|
||||||
|
|
||||||
/* We must be shared. */
|
/* We must be shared. */
|
||||||
MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared);
|
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared);
|
||||||
|
|
||||||
/* Unhare. */
|
/* Unhare. */
|
||||||
const u16 old_count = this->device_use_count--;
|
const u16 old_count = m_device_use_count--;
|
||||||
MESOSPHERE_ABORT_UNLESS(old_count > 0);
|
MESOSPHERE_ABORT_UNLESS(old_count > 0);
|
||||||
|
|
||||||
if (old_count == 1) {
|
if (old_count == 1) {
|
||||||
this->attribute = static_cast<KMemoryAttribute>(this->attribute & ~KMemoryAttribute_DeviceShared);
|
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute_DeviceShared);
|
||||||
}
|
}
|
||||||
|
|
||||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
||||||
|
@ -570,25 +570,25 @@ namespace ams::kern {
|
||||||
|
|
||||||
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, bool right) {
|
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, bool right) {
|
||||||
/* We must either be locked or have a zero lock count. */
|
/* We must either be locked or have a zero lock count. */
|
||||||
MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked || this->ipc_lock_count == 0);
|
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked || m_ipc_lock_count == 0);
|
||||||
|
|
||||||
/* Lock. */
|
/* Lock. */
|
||||||
const u16 new_lock_count = ++this->ipc_lock_count;
|
const u16 new_lock_count = ++m_ipc_lock_count;
|
||||||
MESOSPHERE_ABORT_UNLESS(new_lock_count > 0);
|
MESOSPHERE_ABORT_UNLESS(new_lock_count > 0);
|
||||||
|
|
||||||
/* If this is our first lock, update our permissions. */
|
/* If this is our first lock, update our permissions. */
|
||||||
if (new_lock_count == 1) {
|
if (new_lock_count == 1) {
|
||||||
MESOSPHERE_ASSERT(this->original_perm == KMemoryPermission_None);
|
MESOSPHERE_ASSERT(m_original_perm == KMemoryPermission_None);
|
||||||
MESOSPHERE_ASSERT((this->perm | new_perm | KMemoryPermission_NotMapped) == (this->perm | KMemoryPermission_NotMapped));
|
MESOSPHERE_ASSERT((m_perm | new_perm | KMemoryPermission_NotMapped) == (m_perm | KMemoryPermission_NotMapped));
|
||||||
MESOSPHERE_ASSERT((this->perm & KMemoryPermission_UserExecute) != KMemoryPermission_UserExecute || (new_perm == KMemoryPermission_UserRead));
|
MESOSPHERE_ASSERT((m_perm & KMemoryPermission_UserExecute) != KMemoryPermission_UserExecute || (new_perm == KMemoryPermission_UserRead));
|
||||||
this->original_perm = this->perm;
|
m_original_perm = m_perm;
|
||||||
this->perm = static_cast<KMemoryPermission>((new_perm & KMemoryPermission_IpcLockChangeMask) | (this->original_perm & ~KMemoryPermission_IpcLockChangeMask));
|
m_perm = static_cast<KMemoryPermission>((new_perm & KMemoryPermission_IpcLockChangeMask) | (m_original_perm & ~KMemoryPermission_IpcLockChangeMask));
|
||||||
}
|
}
|
||||||
this->attribute = static_cast<KMemoryAttribute>(this->attribute | KMemoryAttribute_IpcLocked);
|
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute_IpcLocked);
|
||||||
|
|
||||||
if (left) {
|
if (left) {
|
||||||
this->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute | KMemoryBlockDisableMergeAttribute_IpcLeft);
|
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute_IpcLeft);
|
||||||
const u16 new_ipc_disable_merge_count = ++this->ipc_disable_merge_count;
|
const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
|
||||||
MESOSPHERE_ABORT_UNLESS(new_ipc_disable_merge_count > 0);
|
MESOSPHERE_ABORT_UNLESS(new_ipc_disable_merge_count > 0);
|
||||||
}
|
}
|
||||||
MESOSPHERE_UNUSED(right);
|
MESOSPHERE_UNUSED(right);
|
||||||
|
@ -599,32 +599,32 @@ namespace ams::kern {
|
||||||
MESOSPHERE_UNUSED(new_perm);
|
MESOSPHERE_UNUSED(new_perm);
|
||||||
|
|
||||||
/* We must be locked. */
|
/* We must be locked. */
|
||||||
MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked);
|
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked);
|
||||||
|
|
||||||
/* Unlock. */
|
/* Unlock. */
|
||||||
const u16 old_lock_count = this->ipc_lock_count--;
|
const u16 old_lock_count = m_ipc_lock_count--;
|
||||||
MESOSPHERE_ABORT_UNLESS(old_lock_count > 0);
|
MESOSPHERE_ABORT_UNLESS(old_lock_count > 0);
|
||||||
|
|
||||||
/* If this is our last unlock, update our permissions. */
|
/* If this is our last unlock, update our permissions. */
|
||||||
if (old_lock_count == 1) {
|
if (old_lock_count == 1) {
|
||||||
MESOSPHERE_ASSERT(this->original_perm != KMemoryPermission_None);
|
MESOSPHERE_ASSERT(m_original_perm != KMemoryPermission_None);
|
||||||
this->perm = this->original_perm;
|
m_perm = m_original_perm;
|
||||||
this->original_perm = KMemoryPermission_None;
|
m_original_perm = KMemoryPermission_None;
|
||||||
this->attribute = static_cast<KMemoryAttribute>(this->attribute & ~KMemoryAttribute_IpcLocked);
|
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute_IpcLocked);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (left) {
|
if (left) {
|
||||||
const u16 old_ipc_disable_merge_count = this->ipc_disable_merge_count--;
|
const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
|
||||||
MESOSPHERE_ASSERT(old_ipc_disable_merge_count > 0);
|
MESOSPHERE_ASSERT(old_ipc_disable_merge_count > 0);
|
||||||
if (old_ipc_disable_merge_count == 1) {
|
if (old_ipc_disable_merge_count == 1) {
|
||||||
this->disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(this->disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_IpcLeft);
|
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_IpcLeft);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
MESOSPHERE_UNUSED(right);
|
MESOSPHERE_UNUSED(right);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
|
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
|
||||||
return this->disable_merge_attribute;
|
return m_disable_merge_attribute;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
|
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
|
||||||
|
|
|
@ -24,16 +24,16 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
static constexpr size_t MaxBlocks = 2;
|
static constexpr size_t MaxBlocks = 2;
|
||||||
private:
|
private:
|
||||||
KMemoryBlock *blocks[MaxBlocks];
|
KMemoryBlock *m_blocks[MaxBlocks];
|
||||||
size_t index;
|
size_t m_index;
|
||||||
KMemoryBlockSlabManager *slab_manager;
|
KMemoryBlockSlabManager *m_slab_manager;
|
||||||
public:
|
public:
|
||||||
constexpr explicit KMemoryBlockManagerUpdateAllocator(KMemoryBlockSlabManager *sm) : blocks(), index(MaxBlocks), slab_manager(sm) { /* ... */ }
|
constexpr explicit KMemoryBlockManagerUpdateAllocator(KMemoryBlockSlabManager *sm) : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) { /* ... */ }
|
||||||
|
|
||||||
~KMemoryBlockManagerUpdateAllocator() {
|
~KMemoryBlockManagerUpdateAllocator() {
|
||||||
for (const auto &block : this->blocks) {
|
for (const auto &block : m_blocks) {
|
||||||
if (block != nullptr) {
|
if (block != nullptr) {
|
||||||
this->slab_manager->Free(block);
|
m_slab_manager->Free(block);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -43,32 +43,32 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(num_blocks <= MaxBlocks);
|
MESOSPHERE_ASSERT(num_blocks <= MaxBlocks);
|
||||||
|
|
||||||
/* Set index. */
|
/* Set index. */
|
||||||
this->index = MaxBlocks - num_blocks;
|
m_index = MaxBlocks - num_blocks;
|
||||||
|
|
||||||
/* Allocate the blocks. */
|
/* Allocate the blocks. */
|
||||||
for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
|
for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
|
||||||
this->blocks[this->index + i] = this->slab_manager->Allocate();
|
m_blocks[m_index + i] = m_slab_manager->Allocate();
|
||||||
R_UNLESS(this->blocks[this->index + i] != nullptr, svc::ResultOutOfResource());
|
R_UNLESS(m_blocks[m_index + i] != nullptr, svc::ResultOutOfResource());
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
KMemoryBlock *Allocate() {
|
KMemoryBlock *Allocate() {
|
||||||
MESOSPHERE_ABORT_UNLESS(this->index < MaxBlocks);
|
MESOSPHERE_ABORT_UNLESS(m_index < MaxBlocks);
|
||||||
MESOSPHERE_ABORT_UNLESS(this->blocks[this->index] != nullptr);
|
MESOSPHERE_ABORT_UNLESS(m_blocks[m_index] != nullptr);
|
||||||
KMemoryBlock *block = nullptr;
|
KMemoryBlock *block = nullptr;
|
||||||
std::swap(block, this->blocks[this->index++]);
|
std::swap(block, m_blocks[m_index++]);
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Free(KMemoryBlock *block) {
|
void Free(KMemoryBlock *block) {
|
||||||
MESOSPHERE_ABORT_UNLESS(this->index <= MaxBlocks);
|
MESOSPHERE_ABORT_UNLESS(m_index <= MaxBlocks);
|
||||||
MESOSPHERE_ABORT_UNLESS(block != nullptr);
|
MESOSPHERE_ABORT_UNLESS(block != nullptr);
|
||||||
if (this->index == 0) {
|
if (m_index == 0) {
|
||||||
this->slab_manager->Free(block);
|
m_slab_manager->Free(block);
|
||||||
} else {
|
} else {
|
||||||
this->blocks[--this->index] = block;
|
m_blocks[--m_index] = block;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -80,17 +80,17 @@ namespace ams::kern {
|
||||||
using iterator = MemoryBlockTree::iterator;
|
using iterator = MemoryBlockTree::iterator;
|
||||||
using const_iterator = MemoryBlockTree::const_iterator;
|
using const_iterator = MemoryBlockTree::const_iterator;
|
||||||
private:
|
private:
|
||||||
MemoryBlockTree memory_block_tree;
|
MemoryBlockTree m_memory_block_tree;
|
||||||
KProcessAddress start_address;
|
KProcessAddress m_start_address;
|
||||||
KProcessAddress end_address;
|
KProcessAddress m_end_address;
|
||||||
private:
|
private:
|
||||||
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages);
|
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages);
|
||||||
public:
|
public:
|
||||||
constexpr KMemoryBlockManager() : memory_block_tree(), start_address(), end_address() { /* ... */ }
|
constexpr KMemoryBlockManager() : m_memory_block_tree(), m_start_address(), m_end_address() { /* ... */ }
|
||||||
|
|
||||||
iterator end() { return this->memory_block_tree.end(); }
|
iterator end() { return m_memory_block_tree.end(); }
|
||||||
const_iterator end() const { return this->memory_block_tree.end(); }
|
const_iterator end() const { return m_memory_block_tree.end(); }
|
||||||
const_iterator cend() const { return this->memory_block_tree.cend(); }
|
const_iterator cend() const { return m_memory_block_tree.cend(); }
|
||||||
|
|
||||||
Result Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager);
|
Result Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager);
|
||||||
void Finalize(KMemoryBlockSlabManager *slab_manager);
|
void Finalize(KMemoryBlockSlabManager *slab_manager);
|
||||||
|
@ -103,11 +103,11 @@ namespace ams::kern {
|
||||||
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
|
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
|
||||||
|
|
||||||
iterator FindIterator(KProcessAddress address) const {
|
iterator FindIterator(KProcessAddress address) const {
|
||||||
return this->memory_block_tree.find(KMemoryBlock(address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None));
|
return m_memory_block_tree.find(KMemoryBlock(address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None));
|
||||||
}
|
}
|
||||||
|
|
||||||
const KMemoryBlock *FindBlock(KProcessAddress address) const {
|
const KMemoryBlock *FindBlock(KProcessAddress address) const {
|
||||||
if (const_iterator it = this->FindIterator(address); it != this->memory_block_tree.end()) {
|
if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
|
||||||
return std::addressof(*it);
|
return std::addressof(*it);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,11 +121,11 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KScopedMemoryBlockManagerAuditor {
|
class KScopedMemoryBlockManagerAuditor {
|
||||||
private:
|
private:
|
||||||
KMemoryBlockManager *manager;
|
KMemoryBlockManager *m_manager;
|
||||||
public:
|
public:
|
||||||
explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager *m) : manager(m) { MESOSPHERE_AUDIT(this->manager->CheckState()); }
|
explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager *m) : m_manager(m) { MESOSPHERE_AUDIT(m_manager->CheckState()); }
|
||||||
explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager &m) : KScopedMemoryBlockManagerAuditor(std::addressof(m)) { /* ... */ }
|
explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager &m) : KScopedMemoryBlockManagerAuditor(std::addressof(m)) { /* ... */ }
|
||||||
ALWAYS_INLINE ~KScopedMemoryBlockManagerAuditor() { MESOSPHERE_AUDIT(this->manager->CheckState()); }
|
ALWAYS_INLINE ~KScopedMemoryBlockManagerAuditor() { MESOSPHERE_AUDIT(m_manager->CheckState()); }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,50 +61,50 @@ namespace ams::kern {
|
||||||
return (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
|
return (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
KPageHeap heap;
|
KPageHeap m_heap;
|
||||||
RefCount *page_reference_counts;
|
RefCount *m_page_reference_counts;
|
||||||
KVirtualAddress management_region;
|
KVirtualAddress m_management_region;
|
||||||
Pool pool;
|
Pool m_pool;
|
||||||
Impl *next;
|
Impl *m_next;
|
||||||
Impl *prev;
|
Impl *m_prev;
|
||||||
public:
|
public:
|
||||||
Impl() : heap(), page_reference_counts(), management_region(), pool(), next(), prev() { /* ... */ }
|
Impl() : m_heap(), m_page_reference_counts(), m_management_region(), m_pool(), m_next(), m_prev() { /* ... */ }
|
||||||
|
|
||||||
size_t Initialize(uintptr_t address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p);
|
size_t Initialize(uintptr_t address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p);
|
||||||
|
|
||||||
KVirtualAddress AllocateBlock(s32 index, bool random) { return this->heap.AllocateBlock(index, random); }
|
KVirtualAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
|
||||||
void Free(KVirtualAddress addr, size_t num_pages) { this->heap.Free(addr, num_pages); }
|
void Free(KVirtualAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
|
||||||
|
|
||||||
void UpdateUsedHeapSize() { this->heap.UpdateUsedSize(); }
|
void UpdateUsedHeapSize() { m_heap.UpdateUsedSize(); }
|
||||||
|
|
||||||
void InitializeOptimizedMemory() { std::memset(GetVoidPointer(this->management_region), 0, CalculateOptimizedProcessOverheadSize(this->heap.GetSize())); }
|
void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); }
|
||||||
|
|
||||||
void TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages);
|
void TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages);
|
||||||
void TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages);
|
void TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages);
|
||||||
|
|
||||||
bool ProcessOptimizedAllocation(KVirtualAddress block, size_t num_pages, u8 fill_pattern);
|
bool ProcessOptimizedAllocation(KVirtualAddress block, size_t num_pages, u8 fill_pattern);
|
||||||
|
|
||||||
constexpr Pool GetPool() const { return this->pool; }
|
constexpr Pool GetPool() const { return m_pool; }
|
||||||
constexpr size_t GetSize() const { return this->heap.GetSize(); }
|
constexpr size_t GetSize() const { return m_heap.GetSize(); }
|
||||||
constexpr KVirtualAddress GetEndAddress() const { return this->heap.GetEndAddress(); }
|
constexpr KVirtualAddress GetEndAddress() const { return m_heap.GetEndAddress(); }
|
||||||
|
|
||||||
size_t GetFreeSize() const { return this->heap.GetFreeSize(); }
|
size_t GetFreeSize() const { return m_heap.GetFreeSize(); }
|
||||||
|
|
||||||
void DumpFreeList() const { return this->heap.DumpFreeList(); }
|
void DumpFreeList() const { return m_heap.DumpFreeList(); }
|
||||||
|
|
||||||
constexpr size_t GetPageOffset(KVirtualAddress address) const { return this->heap.GetPageOffset(address); }
|
constexpr size_t GetPageOffset(KVirtualAddress address) const { return m_heap.GetPageOffset(address); }
|
||||||
constexpr size_t GetPageOffsetToEnd(KVirtualAddress address) const { return this->heap.GetPageOffsetToEnd(address); }
|
constexpr size_t GetPageOffsetToEnd(KVirtualAddress address) const { return m_heap.GetPageOffsetToEnd(address); }
|
||||||
|
|
||||||
constexpr void SetNext(Impl *n) { this->next = n; }
|
constexpr void SetNext(Impl *n) { m_next = n; }
|
||||||
constexpr void SetPrev(Impl *n) { this->prev = n; }
|
constexpr void SetPrev(Impl *n) { m_prev = n; }
|
||||||
constexpr Impl *GetNext() const { return this->next; }
|
constexpr Impl *GetNext() const { return m_next; }
|
||||||
constexpr Impl *GetPrev() const { return this->prev; }
|
constexpr Impl *GetPrev() const { return m_prev; }
|
||||||
|
|
||||||
void OpenFirst(KVirtualAddress address, size_t num_pages) {
|
void OpenFirst(KVirtualAddress address, size_t num_pages) {
|
||||||
size_t index = this->GetPageOffset(address);
|
size_t index = this->GetPageOffset(address);
|
||||||
const size_t end = index + num_pages;
|
const size_t end = index + num_pages;
|
||||||
while (index < end) {
|
while (index < end) {
|
||||||
const RefCount ref_count = (++this->page_reference_counts[index]);
|
const RefCount ref_count = (++m_page_reference_counts[index]);
|
||||||
MESOSPHERE_ABORT_UNLESS(ref_count == 1);
|
MESOSPHERE_ABORT_UNLESS(ref_count == 1);
|
||||||
|
|
||||||
index++;
|
index++;
|
||||||
|
@ -115,7 +115,7 @@ namespace ams::kern {
|
||||||
size_t index = this->GetPageOffset(address);
|
size_t index = this->GetPageOffset(address);
|
||||||
const size_t end = index + num_pages;
|
const size_t end = index + num_pages;
|
||||||
while (index < end) {
|
while (index < end) {
|
||||||
const RefCount ref_count = (++this->page_reference_counts[index]);
|
const RefCount ref_count = (++m_page_reference_counts[index]);
|
||||||
MESOSPHERE_ABORT_UNLESS(ref_count > 1);
|
MESOSPHERE_ABORT_UNLESS(ref_count > 1);
|
||||||
|
|
||||||
index++;
|
index++;
|
||||||
|
@ -129,8 +129,8 @@ namespace ams::kern {
|
||||||
size_t free_start = 0;
|
size_t free_start = 0;
|
||||||
size_t free_count = 0;
|
size_t free_count = 0;
|
||||||
while (index < end) {
|
while (index < end) {
|
||||||
MESOSPHERE_ABORT_UNLESS(this->page_reference_counts[index] > 0);
|
MESOSPHERE_ABORT_UNLESS(m_page_reference_counts[index] > 0);
|
||||||
const RefCount ref_count = (--this->page_reference_counts[index]);
|
const RefCount ref_count = (--m_page_reference_counts[index]);
|
||||||
|
|
||||||
/* Keep track of how many zero refcounts we see in a row, to minimize calls to free. */
|
/* Keep track of how many zero refcounts we see in a row, to minimize calls to free. */
|
||||||
if (ref_count == 0) {
|
if (ref_count == 0) {
|
||||||
|
@ -142,7 +142,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (free_count > 0) {
|
if (free_count > 0) {
|
||||||
this->Free(this->heap.GetAddress() + free_start * PageSize, free_count);
|
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
|
||||||
free_count = 0;
|
free_count = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -151,25 +151,25 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (free_count > 0) {
|
if (free_count > 0) {
|
||||||
this->Free(this->heap.GetAddress() + free_start * PageSize, free_count);
|
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
KLightLock pool_locks[Pool_Count];
|
KLightLock m_pool_locks[Pool_Count];
|
||||||
Impl *pool_managers_head[Pool_Count];
|
Impl *m_pool_managers_head[Pool_Count];
|
||||||
Impl *pool_managers_tail[Pool_Count];
|
Impl *m_pool_managers_tail[Pool_Count];
|
||||||
Impl managers[MaxManagerCount];
|
Impl m_managers[MaxManagerCount];
|
||||||
size_t num_managers;
|
size_t m_num_managers;
|
||||||
u64 optimized_process_ids[Pool_Count];
|
u64 m_optimized_process_ids[Pool_Count];
|
||||||
bool has_optimized_process[Pool_Count];
|
bool m_has_optimized_process[Pool_Count];
|
||||||
private:
|
private:
|
||||||
Impl &GetManager(KVirtualAddress address) {
|
Impl &GetManager(KVirtualAddress address) {
|
||||||
return this->managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
|
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
|
||||||
return dir == Direction_FromBack ? this->pool_managers_tail[pool] : this->pool_managers_head[pool];
|
return dir == Direction_FromBack ? m_pool_managers_tail[pool] : m_pool_managers_head[pool];
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Impl *GetNextManager(Impl *cur, Direction dir) {
|
constexpr Impl *GetNextManager(Impl *cur, Direction dir) {
|
||||||
|
@ -183,7 +183,7 @@ namespace ams::kern {
|
||||||
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random);
|
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random);
|
||||||
public:
|
public:
|
||||||
KMemoryManager()
|
KMemoryManager()
|
||||||
: pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process()
|
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process()
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
@ -204,7 +204,7 @@ namespace ams::kern {
|
||||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedLightLock lk(this->pool_locks[manager.GetPool()]);
|
KScopedLightLock lk(m_pool_locks[manager.GetPool()]);
|
||||||
manager.Open(address, cur_pages);
|
manager.Open(address, cur_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -220,7 +220,7 @@ namespace ams::kern {
|
||||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedLightLock lk(this->pool_locks[manager.GetPool()]);
|
KScopedLightLock lk(m_pool_locks[manager.GetPool()]);
|
||||||
manager.Close(address, cur_pages);
|
manager.Close(address, cur_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,8 +231,8 @@ namespace ams::kern {
|
||||||
|
|
||||||
size_t GetSize() {
|
size_t GetSize() {
|
||||||
size_t total = 0;
|
size_t total = 0;
|
||||||
for (size_t i = 0; i < this->num_managers; i++) {
|
for (size_t i = 0; i < m_num_managers; i++) {
|
||||||
total += this->managers[i].GetSize();
|
total += m_managers[i].GetSize();
|
||||||
}
|
}
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
|
@ -248,15 +248,15 @@ namespace ams::kern {
|
||||||
|
|
||||||
size_t GetFreeSize() {
|
size_t GetFreeSize() {
|
||||||
size_t total = 0;
|
size_t total = 0;
|
||||||
for (size_t i = 0; i < this->num_managers; i++) {
|
for (size_t i = 0; i < m_num_managers; i++) {
|
||||||
KScopedLightLock lk(this->pool_locks[this->managers[i].GetPool()]);
|
KScopedLightLock lk(m_pool_locks[m_managers[i].GetPool()]);
|
||||||
total += this->managers[i].GetFreeSize();
|
total += m_managers[i].GetFreeSize();
|
||||||
}
|
}
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetFreeSize(Pool pool) {
|
size_t GetFreeSize(Pool pool) {
|
||||||
KScopedLightLock lk(this->pool_locks[pool]);
|
KScopedLightLock lk(m_pool_locks[pool]);
|
||||||
|
|
||||||
constexpr Direction GetSizeDirection = Direction_FromFront;
|
constexpr Direction GetSizeDirection = Direction_FromFront;
|
||||||
size_t total = 0;
|
size_t total = 0;
|
||||||
|
@ -267,7 +267,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
void DumpFreeList(Pool pool) {
|
void DumpFreeList(Pool pool) {
|
||||||
KScopedLightLock lk(this->pool_locks[pool]);
|
KScopedLightLock lk(m_pool_locks[pool]);
|
||||||
|
|
||||||
constexpr Direction DumpDirection = Direction_FromFront;
|
constexpr Direction DumpDirection = Direction_FromFront;
|
||||||
for (auto *manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr; manager = this->GetNextManager(manager, DumpDirection)) {
|
for (auto *manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr; manager = this->GetNextManager(manager, DumpDirection)) {
|
||||||
|
|
|
@ -27,11 +27,11 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
friend class KMemoryRegionTree;
|
friend class KMemoryRegionTree;
|
||||||
private:
|
private:
|
||||||
uintptr_t address;
|
uintptr_t m_address;
|
||||||
uintptr_t pair_address;
|
uintptr_t m_pair_address;
|
||||||
uintptr_t last_address;
|
uintptr_t m_last_address;
|
||||||
u32 attributes;
|
u32 m_attributes;
|
||||||
u32 type_id;
|
u32 m_type_id;
|
||||||
public:
|
public:
|
||||||
static constexpr ALWAYS_INLINE int Compare(const KMemoryRegion &lhs, const KMemoryRegion &rhs) {
|
static constexpr ALWAYS_INLINE int Compare(const KMemoryRegion &lhs, const KMemoryRegion &rhs) {
|
||||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||||
|
@ -43,32 +43,32 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KMemoryRegion() : address(0), pair_address(0), last_address(0), attributes(0), type_id(0) { /* ... */ }
|
constexpr ALWAYS_INLINE KMemoryRegion() : m_address(0), m_pair_address(0), m_last_address(0), m_attributes(0), m_type_id(0) { /* ... */ }
|
||||||
constexpr ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t la, uintptr_t p, u32 r, u32 t) :
|
constexpr ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t la, uintptr_t p, u32 r, u32 t) :
|
||||||
address(a), pair_address(p), last_address(la), attributes(r), type_id(t)
|
m_address(a), m_pair_address(p), m_last_address(la), m_attributes(r), m_type_id(t)
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
constexpr ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t la, u32 r, u32 t) : KMemoryRegion(a, la, std::numeric_limits<uintptr_t>::max(), r, t) { /* ... */ }
|
constexpr ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t la, u32 r, u32 t) : KMemoryRegion(a, la, std::numeric_limits<uintptr_t>::max(), r, t) { /* ... */ }
|
||||||
private:
|
private:
|
||||||
constexpr ALWAYS_INLINE void Reset(uintptr_t a, uintptr_t la, uintptr_t p, u32 r, u32 t) {
|
constexpr ALWAYS_INLINE void Reset(uintptr_t a, uintptr_t la, uintptr_t p, u32 r, u32 t) {
|
||||||
this->address = a;
|
m_address = a;
|
||||||
this->pair_address = p;
|
m_pair_address = p;
|
||||||
this->last_address = la;
|
m_last_address = la;
|
||||||
this->attributes = r;
|
m_attributes = r;
|
||||||
this->type_id = t;
|
m_type_id = t;
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE uintptr_t GetAddress() const {
|
constexpr ALWAYS_INLINE uintptr_t GetAddress() const {
|
||||||
return this->address;
|
return m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE uintptr_t GetPairAddress() const {
|
constexpr ALWAYS_INLINE uintptr_t GetPairAddress() const {
|
||||||
return this->pair_address;
|
return m_pair_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const {
|
constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const {
|
||||||
return this->last_address;
|
return m_last_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const {
|
constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const {
|
||||||
|
@ -80,16 +80,16 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u32 GetAttributes() const {
|
constexpr ALWAYS_INLINE u32 GetAttributes() const {
|
||||||
return this->attributes;
|
return m_attributes;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u32 GetType() const {
|
constexpr ALWAYS_INLINE u32 GetType() const {
|
||||||
return this->type_id;
|
return m_type_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetType(u32 type) {
|
constexpr ALWAYS_INLINE void SetType(u32 type) {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(this->CanDerive(type));
|
MESOSPHERE_INIT_ABORT_UNLESS(this->CanDerive(type));
|
||||||
this->type_id = type;
|
m_type_id = type;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
||||||
|
@ -110,11 +110,11 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetPairAddress(uintptr_t a) {
|
constexpr ALWAYS_INLINE void SetPairAddress(uintptr_t a) {
|
||||||
this->pair_address = a;
|
m_pair_address = a;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetTypeAttribute(KMemoryRegionAttr attr) {
|
constexpr ALWAYS_INLINE void SetTypeAttribute(KMemoryRegionAttr attr) {
|
||||||
this->type_id |= attr;
|
m_type_id |= attr;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
static_assert(std::is_trivially_destructible<KMemoryRegion>::value);
|
static_assert(std::is_trivially_destructible<KMemoryRegion>::value);
|
||||||
|
@ -156,9 +156,9 @@ namespace ams::kern {
|
||||||
using iterator = TreeType::iterator;
|
using iterator = TreeType::iterator;
|
||||||
using const_iterator = TreeType::const_iterator;
|
using const_iterator = TreeType::const_iterator;
|
||||||
private:
|
private:
|
||||||
TreeType tree;
|
TreeType m_tree;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KMemoryRegionTree() : tree() { /* ... */ }
|
constexpr ALWAYS_INLINE KMemoryRegionTree() : m_tree() { /* ... */ }
|
||||||
public:
|
public:
|
||||||
KMemoryRegion *FindModifiable(uintptr_t address) {
|
KMemoryRegion *FindModifiable(uintptr_t address) {
|
||||||
if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) {
|
if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) {
|
||||||
|
@ -246,19 +246,19 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
/* Iterator accessors. */
|
/* Iterator accessors. */
|
||||||
iterator begin() {
|
iterator begin() {
|
||||||
return this->tree.begin();
|
return m_tree.begin();
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator begin() const {
|
const_iterator begin() const {
|
||||||
return this->tree.begin();
|
return m_tree.begin();
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator end() {
|
iterator end() {
|
||||||
return this->tree.end();
|
return m_tree.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator end() const {
|
const_iterator end() const {
|
||||||
return this->tree.end();
|
return m_tree.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator cbegin() const {
|
const_iterator cbegin() const {
|
||||||
|
@ -270,49 +270,49 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator iterator_to(reference ref) {
|
iterator iterator_to(reference ref) {
|
||||||
return this->tree.iterator_to(ref);
|
return m_tree.iterator_to(ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
const_iterator iterator_to(const_reference ref) const {
|
const_iterator iterator_to(const_reference ref) const {
|
||||||
return this->tree.iterator_to(ref);
|
return m_tree.iterator_to(ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Content management. */
|
/* Content management. */
|
||||||
bool empty() const {
|
bool empty() const {
|
||||||
return this->tree.empty();
|
return m_tree.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
reference back() {
|
reference back() {
|
||||||
return this->tree.back();
|
return m_tree.back();
|
||||||
}
|
}
|
||||||
|
|
||||||
const_reference back() const {
|
const_reference back() const {
|
||||||
return this->tree.back();
|
return m_tree.back();
|
||||||
}
|
}
|
||||||
|
|
||||||
reference front() {
|
reference front() {
|
||||||
return this->tree.front();
|
return m_tree.front();
|
||||||
}
|
}
|
||||||
|
|
||||||
const_reference front() const {
|
const_reference front() const {
|
||||||
return this->tree.front();
|
return m_tree.front();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* GCC over-eagerly inlines this operation. */
|
/* GCC over-eagerly inlines this operation. */
|
||||||
NOINLINE iterator insert(reference ref) {
|
NOINLINE iterator insert(reference ref) {
|
||||||
return this->tree.insert(ref);
|
return m_tree.insert(ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE iterator erase(iterator it) {
|
NOINLINE iterator erase(iterator it) {
|
||||||
return this->tree.erase(it);
|
return m_tree.erase(it);
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator find(const_reference ref) const {
|
iterator find(const_reference ref) const {
|
||||||
return this->tree.find(ref);
|
return m_tree.find(ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator nfind(const_reference ref) const {
|
iterator nfind(const_reference ref) const {
|
||||||
return this->tree.nfind(ref);
|
return m_tree.nfind(ref);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -52,73 +52,73 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
using ValueType = typename std::underlying_type<KMemoryRegionType>::type;
|
using ValueType = typename std::underlying_type<KMemoryRegionType>::type;
|
||||||
private:
|
private:
|
||||||
ValueType value;
|
ValueType m_value;
|
||||||
size_t next_bit;
|
size_t m_next_bit;
|
||||||
bool finalized;
|
bool m_finalized;
|
||||||
bool sparse_only;
|
bool m_sparse_only;
|
||||||
bool dense_only;
|
bool m_dense_only;
|
||||||
private:
|
private:
|
||||||
consteval KMemoryRegionTypeValue(ValueType v) : value(v), next_bit(0), finalized(false), sparse_only(false), dense_only(false) { /* ... */ }
|
consteval KMemoryRegionTypeValue(ValueType v) : m_value(v), m_next_bit(0), m_finalized(false), m_sparse_only(false), m_dense_only(false) { /* ... */ }
|
||||||
public:
|
public:
|
||||||
consteval KMemoryRegionTypeValue() : KMemoryRegionTypeValue(0) { /* ... */ }
|
consteval KMemoryRegionTypeValue() : KMemoryRegionTypeValue(0) { /* ... */ }
|
||||||
|
|
||||||
consteval operator KMemoryRegionType() const { return static_cast<KMemoryRegionType>(this->value); }
|
consteval operator KMemoryRegionType() const { return static_cast<KMemoryRegionType>(m_value); }
|
||||||
consteval ValueType GetValue() const { return this->value; }
|
consteval ValueType GetValue() const { return m_value; }
|
||||||
|
|
||||||
consteval const KMemoryRegionTypeValue &Finalize() { this->finalized = true; return *this; }
|
consteval const KMemoryRegionTypeValue &Finalize() { m_finalized = true; return *this; }
|
||||||
consteval const KMemoryRegionTypeValue &SetSparseOnly() { this->sparse_only = true; return *this; }
|
consteval const KMemoryRegionTypeValue &SetSparseOnly() { m_sparse_only = true; return *this; }
|
||||||
consteval const KMemoryRegionTypeValue &SetDenseOnly() { this->dense_only = true; return *this; }
|
consteval const KMemoryRegionTypeValue &SetDenseOnly() { m_dense_only = true; return *this; }
|
||||||
|
|
||||||
consteval KMemoryRegionTypeValue &SetAttribute(KMemoryRegionAttr attr) { AMS_ASSUME(!this->finalized); this->value |= attr; return *this; }
|
consteval KMemoryRegionTypeValue &SetAttribute(KMemoryRegionAttr attr) { AMS_ASSUME(!m_finalized); m_value |= attr; return *this; }
|
||||||
|
|
||||||
consteval KMemoryRegionTypeValue DeriveInitial(size_t i, size_t next = BITSIZEOF(ValueType)) const {
|
consteval KMemoryRegionTypeValue DeriveInitial(size_t i, size_t next = BITSIZEOF(ValueType)) const {
|
||||||
AMS_ASSUME(!this->finalized);
|
AMS_ASSUME(!m_finalized);
|
||||||
AMS_ASSUME(!this->value);
|
AMS_ASSUME(!m_value);
|
||||||
AMS_ASSUME(!this->next_bit);
|
AMS_ASSUME(!m_next_bit);
|
||||||
AMS_ASSUME(next > i);
|
AMS_ASSUME(next > i);
|
||||||
|
|
||||||
KMemoryRegionTypeValue new_type = *this;
|
KMemoryRegionTypeValue new_type = *this;
|
||||||
new_type.value = (ValueType{1} << i);
|
new_type.m_value = (ValueType{1} << i);
|
||||||
new_type.next_bit = next;
|
new_type.m_next_bit = next;
|
||||||
return new_type;
|
return new_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
consteval KMemoryRegionTypeValue DeriveAttribute(KMemoryRegionAttr attr) const {
|
consteval KMemoryRegionTypeValue DeriveAttribute(KMemoryRegionAttr attr) const {
|
||||||
AMS_ASSUME(!this->finalized);
|
AMS_ASSUME(!m_finalized);
|
||||||
|
|
||||||
KMemoryRegionTypeValue new_type = *this;
|
KMemoryRegionTypeValue new_type = *this;
|
||||||
new_type.value |= attr;
|
new_type.m_value |= attr;
|
||||||
return new_type;
|
return new_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
consteval KMemoryRegionTypeValue DeriveTransition(size_t ofs = 0, size_t adv = 1) const {
|
consteval KMemoryRegionTypeValue DeriveTransition(size_t ofs = 0, size_t adv = 1) const {
|
||||||
AMS_ASSUME(!this->finalized);
|
AMS_ASSUME(!m_finalized);
|
||||||
AMS_ASSUME(ofs < adv);
|
AMS_ASSUME(ofs < adv);
|
||||||
AMS_ASSUME(this->next_bit + adv <= BITSIZEOF(ValueType));
|
AMS_ASSUME(m_next_bit + adv <= BITSIZEOF(ValueType));
|
||||||
|
|
||||||
KMemoryRegionTypeValue new_type = *this;
|
KMemoryRegionTypeValue new_type = *this;
|
||||||
new_type.value |= (ValueType{1} << (this->next_bit + ofs));
|
new_type.m_value |= (ValueType{1} << (m_next_bit + ofs));
|
||||||
new_type.next_bit += adv;
|
new_type.m_next_bit += adv;
|
||||||
return new_type;
|
return new_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
consteval KMemoryRegionTypeValue DeriveSparse(size_t ofs, size_t n, size_t i) const {
|
consteval KMemoryRegionTypeValue DeriveSparse(size_t ofs, size_t n, size_t i) const {
|
||||||
AMS_ASSUME(!this->finalized);
|
AMS_ASSUME(!m_finalized);
|
||||||
AMS_ASSUME(!this->dense_only);
|
AMS_ASSUME(!m_dense_only);
|
||||||
AMS_ASSUME(this->next_bit + ofs + n + 1 <= BITSIZEOF(ValueType));
|
AMS_ASSUME(m_next_bit + ofs + n + 1 <= BITSIZEOF(ValueType));
|
||||||
AMS_ASSUME(i < n);
|
AMS_ASSUME(i < n);
|
||||||
|
|
||||||
KMemoryRegionTypeValue new_type = *this;
|
KMemoryRegionTypeValue new_type = *this;
|
||||||
new_type.value |= (ValueType{1} << (this->next_bit + ofs));
|
new_type.m_value |= (ValueType{1} << (m_next_bit + ofs));
|
||||||
new_type.value |= (ValueType{1} << (this->next_bit + ofs + 1 + i));
|
new_type.m_value |= (ValueType{1} << (m_next_bit + ofs + 1 + i));
|
||||||
new_type.next_bit += ofs + n + 1;
|
new_type.m_next_bit += ofs + n + 1;
|
||||||
return new_type;
|
return new_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
consteval KMemoryRegionTypeValue Derive(size_t n, size_t i) const {
|
consteval KMemoryRegionTypeValue Derive(size_t n, size_t i) const {
|
||||||
AMS_ASSUME(!this->finalized);
|
AMS_ASSUME(!m_finalized);
|
||||||
AMS_ASSUME(!this->sparse_only);
|
AMS_ASSUME(!m_sparse_only);
|
||||||
AMS_ASSUME(this->next_bit + BitsForDeriveDense(n) <= BITSIZEOF(ValueType));
|
AMS_ASSUME(m_next_bit + BitsForDeriveDense(n) <= BITSIZEOF(ValueType));
|
||||||
AMS_ASSUME(i < n);
|
AMS_ASSUME(i < n);
|
||||||
|
|
||||||
size_t low = 0, high = 1;
|
size_t low = 0, high = 1;
|
||||||
|
@ -132,23 +132,23 @@ namespace ams::kern {
|
||||||
|
|
||||||
|
|
||||||
KMemoryRegionTypeValue new_type = *this;
|
KMemoryRegionTypeValue new_type = *this;
|
||||||
new_type.value |= (ValueType{1} << (this->next_bit + low));
|
new_type.m_value |= (ValueType{1} << (m_next_bit + low));
|
||||||
new_type.value |= (ValueType{1} << (this->next_bit + high));
|
new_type.m_value |= (ValueType{1} << (m_next_bit + high));
|
||||||
new_type.next_bit += BitsForDeriveDense(n);
|
new_type.m_next_bit += BitsForDeriveDense(n);
|
||||||
return new_type;
|
return new_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
consteval KMemoryRegionTypeValue Advance(size_t n) const {
|
consteval KMemoryRegionTypeValue Advance(size_t n) const {
|
||||||
AMS_ASSUME(!this->finalized);
|
AMS_ASSUME(!m_finalized);
|
||||||
AMS_ASSUME(this->next_bit + n <= BITSIZEOF(ValueType));
|
AMS_ASSUME(m_next_bit + n <= BITSIZEOF(ValueType));
|
||||||
|
|
||||||
KMemoryRegionTypeValue new_type = *this;
|
KMemoryRegionTypeValue new_type = *this;
|
||||||
new_type.next_bit += n;
|
new_type.m_next_bit += n;
|
||||||
return new_type;
|
return new_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsAncestorOf(ValueType v) const {
|
constexpr ALWAYS_INLINE bool IsAncestorOf(ValueType v) const {
|
||||||
return (this->value | v) == v;
|
return (m_value | v) == v;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -27,10 +27,10 @@ namespace ams::kern {
|
||||||
|
|
||||||
using List = util::IntrusiveListBaseTraits<KObjectName>::ListType;
|
using List = util::IntrusiveListBaseTraits<KObjectName>::ListType;
|
||||||
private:
|
private:
|
||||||
char name[NameLengthMax];
|
char m_name[NameLengthMax];
|
||||||
KAutoObject *object;
|
KAutoObject *m_object;
|
||||||
public:
|
public:
|
||||||
constexpr KObjectName() : name(), object() { /* ... */ }
|
constexpr KObjectName() : m_name(), m_object() { /* ... */ }
|
||||||
public:
|
public:
|
||||||
static Result NewFromName(KAutoObject *obj, const char *name);
|
static Result NewFromName(KAutoObject *obj, const char *name);
|
||||||
static Result Delete(KAutoObject *obj, const char *name);
|
static Result Delete(KAutoObject *obj, const char *name);
|
||||||
|
@ -60,7 +60,7 @@ namespace ams::kern {
|
||||||
void Initialize(KAutoObject *obj, const char *name);
|
void Initialize(KAutoObject *obj, const char *name);
|
||||||
|
|
||||||
bool MatchesName(const char *name) const;
|
bool MatchesName(const char *name) const;
|
||||||
KAutoObject *GetObject() const { return this->object; }
|
KAutoObject *GetObject() const { return m_object; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,28 +23,28 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
class RandomBitGenerator {
|
class RandomBitGenerator {
|
||||||
private:
|
private:
|
||||||
util::TinyMT rng;
|
util::TinyMT m_rng;
|
||||||
u32 entropy;
|
u32 m_entropy;
|
||||||
u32 bits_available;
|
u32 m_bits_available;
|
||||||
private:
|
private:
|
||||||
void RefreshEntropy() {
|
void RefreshEntropy() {
|
||||||
this->entropy = rng.GenerateRandomU32();
|
m_entropy = m_rng.GenerateRandomU32();
|
||||||
this->bits_available = BITSIZEOF(this->entropy);
|
m_bits_available = BITSIZEOF(m_entropy);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GenerateRandomBit() {
|
bool GenerateRandomBit() {
|
||||||
if (this->bits_available == 0) {
|
if (m_bits_available == 0) {
|
||||||
this->RefreshEntropy();
|
this->RefreshEntropy();
|
||||||
}
|
}
|
||||||
|
|
||||||
const bool rnd_bit = (this->entropy & 1) != 0;
|
const bool rnd_bit = (m_entropy & 1) != 0;
|
||||||
this->entropy >>= 1;
|
m_entropy >>= 1;
|
||||||
--this->bits_available;
|
--m_bits_available;
|
||||||
return rnd_bit;
|
return rnd_bit;
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
RandomBitGenerator() : rng(), entropy(), bits_available() {
|
RandomBitGenerator() : m_rng(), m_entropy(), m_bits_available() {
|
||||||
this->rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
|
m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t SelectRandomBit(u64 bitmap) {
|
size_t SelectRandomBit(u64 bitmap) {
|
||||||
|
@ -89,27 +89,27 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
static constexpr size_t MaxDepth = 4;
|
static constexpr size_t MaxDepth = 4;
|
||||||
private:
|
private:
|
||||||
u64 *bit_storages[MaxDepth];
|
u64 *m_bit_storages[MaxDepth];
|
||||||
RandomBitGenerator rng;
|
RandomBitGenerator m_rng;
|
||||||
size_t num_bits;
|
size_t m_num_bits;
|
||||||
size_t used_depths;
|
size_t m_used_depths;
|
||||||
public:
|
public:
|
||||||
KPageBitmap() : bit_storages(), rng(), num_bits(), used_depths() { /* ... */ }
|
KPageBitmap() : m_bit_storages(), m_rng(), m_num_bits(), m_used_depths() { /* ... */ }
|
||||||
|
|
||||||
constexpr size_t GetNumBits() const { return this->num_bits; }
|
constexpr size_t GetNumBits() const { return m_num_bits; }
|
||||||
constexpr s32 GetHighestDepthIndex() const { return static_cast<s32>(this->used_depths) - 1; }
|
constexpr s32 GetHighestDepthIndex() const { return static_cast<s32>(m_used_depths) - 1; }
|
||||||
|
|
||||||
u64 *Initialize(u64 *storage, size_t size) {
|
u64 *Initialize(u64 *storage, size_t size) {
|
||||||
/* Initially, everything is un-set. */
|
/* Initially, everything is un-set. */
|
||||||
this->num_bits = 0;
|
m_num_bits = 0;
|
||||||
|
|
||||||
/* Calculate the needed bitmap depth. */
|
/* Calculate the needed bitmap depth. */
|
||||||
this->used_depths = static_cast<size_t>(GetRequiredDepth(size));
|
m_used_depths = static_cast<size_t>(GetRequiredDepth(size));
|
||||||
MESOSPHERE_ASSERT(this->used_depths <= MaxDepth);
|
MESOSPHERE_ASSERT(m_used_depths <= MaxDepth);
|
||||||
|
|
||||||
/* Set the bitmap pointers. */
|
/* Set the bitmap pointers. */
|
||||||
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
|
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
|
||||||
this->bit_storages[depth] = storage;
|
m_bit_storages[depth] = storage;
|
||||||
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
||||||
storage += size;
|
storage += size;
|
||||||
}
|
}
|
||||||
|
@ -123,18 +123,18 @@ namespace ams::kern {
|
||||||
|
|
||||||
if (random) {
|
if (random) {
|
||||||
do {
|
do {
|
||||||
const u64 v = this->bit_storages[depth][offset];
|
const u64 v = m_bit_storages[depth][offset];
|
||||||
if (v == 0) {
|
if (v == 0) {
|
||||||
/* If depth is bigger than zero, then a previous level indicated a block was free. */
|
/* If depth is bigger than zero, then a previous level indicated a block was free. */
|
||||||
MESOSPHERE_ASSERT(depth == 0);
|
MESOSPHERE_ASSERT(depth == 0);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
offset = offset * BITSIZEOF(u64) + this->rng.SelectRandomBit(v);
|
offset = offset * BITSIZEOF(u64) + m_rng.SelectRandomBit(v);
|
||||||
++depth;
|
++depth;
|
||||||
} while (depth < static_cast<s32>(this->used_depths));
|
} while (depth < static_cast<s32>(m_used_depths));
|
||||||
} else {
|
} else {
|
||||||
do {
|
do {
|
||||||
const u64 v = this->bit_storages[depth][offset];
|
const u64 v = m_bit_storages[depth][offset];
|
||||||
if (v == 0) {
|
if (v == 0) {
|
||||||
/* If depth is bigger than zero, then a previous level indicated a block was free. */
|
/* If depth is bigger than zero, then a previous level indicated a block was free. */
|
||||||
MESOSPHERE_ASSERT(depth == 0);
|
MESOSPHERE_ASSERT(depth == 0);
|
||||||
|
@ -142,7 +142,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
offset = offset * BITSIZEOF(u64) + __builtin_ctzll(v);
|
offset = offset * BITSIZEOF(u64) + __builtin_ctzll(v);
|
||||||
++depth;
|
++depth;
|
||||||
} while (depth < static_cast<s32>(this->used_depths));
|
} while (depth < static_cast<s32>(m_used_depths));
|
||||||
}
|
}
|
||||||
|
|
||||||
return static_cast<ssize_t>(offset);
|
return static_cast<ssize_t>(offset);
|
||||||
|
@ -150,17 +150,17 @@ namespace ams::kern {
|
||||||
|
|
||||||
void SetBit(size_t offset) {
|
void SetBit(size_t offset) {
|
||||||
this->SetBit(this->GetHighestDepthIndex(), offset);
|
this->SetBit(this->GetHighestDepthIndex(), offset);
|
||||||
this->num_bits++;
|
m_num_bits++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClearBit(size_t offset) {
|
void ClearBit(size_t offset) {
|
||||||
this->ClearBit(this->GetHighestDepthIndex(), offset);
|
this->ClearBit(this->GetHighestDepthIndex(), offset);
|
||||||
this->num_bits--;
|
m_num_bits--;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ClearRange(size_t offset, size_t count) {
|
bool ClearRange(size_t offset, size_t count) {
|
||||||
s32 depth = this->GetHighestDepthIndex();
|
s32 depth = this->GetHighestDepthIndex();
|
||||||
u64 *bits = this->bit_storages[depth];
|
u64 *bits = m_bit_storages[depth];
|
||||||
size_t bit_ind = offset / BITSIZEOF(u64);
|
size_t bit_ind = offset / BITSIZEOF(u64);
|
||||||
if (AMS_LIKELY(count < BITSIZEOF(u64))) {
|
if (AMS_LIKELY(count < BITSIZEOF(u64))) {
|
||||||
const size_t shift = offset % BITSIZEOF(u64);
|
const size_t shift = offset % BITSIZEOF(u64);
|
||||||
|
@ -202,7 +202,7 @@ namespace ams::kern {
|
||||||
} while (remaining > 0);
|
} while (remaining > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
this->num_bits -= count;
|
m_num_bits -= count;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
|
@ -212,7 +212,7 @@ namespace ams::kern {
|
||||||
size_t which = offset % BITSIZEOF(u64);
|
size_t which = offset % BITSIZEOF(u64);
|
||||||
const u64 mask = u64(1) << which;
|
const u64 mask = u64(1) << which;
|
||||||
|
|
||||||
u64 *bit = std::addressof(this->bit_storages[depth][ind]);
|
u64 *bit = std::addressof(m_bit_storages[depth][ind]);
|
||||||
u64 v = *bit;
|
u64 v = *bit;
|
||||||
MESOSPHERE_ASSERT((v & mask) == 0);
|
MESOSPHERE_ASSERT((v & mask) == 0);
|
||||||
*bit = v | mask;
|
*bit = v | mask;
|
||||||
|
@ -230,7 +230,7 @@ namespace ams::kern {
|
||||||
size_t which = offset % BITSIZEOF(u64);
|
size_t which = offset % BITSIZEOF(u64);
|
||||||
const u64 mask = u64(1) << which;
|
const u64 mask = u64(1) << which;
|
||||||
|
|
||||||
u64 *bit = std::addressof(this->bit_storages[depth][ind]);
|
u64 *bit = std::addressof(m_bit_storages[depth][ind]);
|
||||||
u64 v = *bit;
|
u64 v = *bit;
|
||||||
MESOSPHERE_ASSERT((v & mask) != 0);
|
MESOSPHERE_ASSERT((v & mask) != 0);
|
||||||
v &= ~mask;
|
v &= ~mask;
|
||||||
|
|
|
@ -21,10 +21,10 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KPageBuffer : public KSlabAllocated<KPageBuffer> {
|
class KPageBuffer : public KSlabAllocated<KPageBuffer> {
|
||||||
private:
|
private:
|
||||||
alignas(PageSize) u8 buffer[PageSize];
|
alignas(PageSize) u8 m_buffer[PageSize];
|
||||||
public:
|
public:
|
||||||
KPageBuffer() {
|
KPageBuffer() {
|
||||||
std::memset(buffer, 0, sizeof(buffer));
|
std::memset(m_buffer, 0, sizeof(m_buffer));
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const {
|
ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const {
|
||||||
|
|
|
@ -24,24 +24,24 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KBlockInfo : public util::IntrusiveListBaseNode<KBlockInfo> {
|
class KBlockInfo : public util::IntrusiveListBaseNode<KBlockInfo> {
|
||||||
private:
|
private:
|
||||||
KVirtualAddress address;
|
KVirtualAddress m_address;
|
||||||
size_t num_pages;
|
size_t m_num_pages;
|
||||||
public:
|
public:
|
||||||
constexpr KBlockInfo() : util::IntrusiveListBaseNode<KBlockInfo>(), address(), num_pages() { /* ... */ }
|
constexpr KBlockInfo() : util::IntrusiveListBaseNode<KBlockInfo>(), m_address(), m_num_pages() { /* ... */ }
|
||||||
|
|
||||||
constexpr void Initialize(KVirtualAddress addr, size_t np) {
|
constexpr void Initialize(KVirtualAddress addr, size_t np) {
|
||||||
this->address = addr;
|
m_address = addr;
|
||||||
this->num_pages = np;
|
m_num_pages = np;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KVirtualAddress GetAddress() const { return this->address; }
|
constexpr KVirtualAddress GetAddress() const { return m_address; }
|
||||||
constexpr size_t GetNumPages() const { return this->num_pages; }
|
constexpr size_t GetNumPages() const { return m_num_pages; }
|
||||||
constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
||||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||||
constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; }
|
constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; }
|
||||||
|
|
||||||
constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
||||||
return this->address == rhs.address && this->num_pages == rhs.num_pages;
|
return m_address == rhs.m_address && m_num_pages == rhs.m_num_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool operator==(const KBlockInfo &rhs) const {
|
constexpr bool operator==(const KBlockInfo &rhs) const {
|
||||||
|
@ -55,7 +55,7 @@ namespace ams::kern {
|
||||||
constexpr bool IsStrictlyBefore(KVirtualAddress addr) const {
|
constexpr bool IsStrictlyBefore(KVirtualAddress addr) const {
|
||||||
const KVirtualAddress end = this->GetEndAddress();
|
const KVirtualAddress end = this->GetEndAddress();
|
||||||
|
|
||||||
if (this->address != Null<KVirtualAddress> && end == Null<KVirtualAddress>) {
|
if (m_address != Null<KVirtualAddress> && end == Null<KVirtualAddress>) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
constexpr bool TryConcatenate(KVirtualAddress addr, size_t np) {
|
constexpr bool TryConcatenate(KVirtualAddress addr, size_t np) {
|
||||||
if (addr != Null<KVirtualAddress> && addr == this->GetEndAddress()) {
|
if (addr != Null<KVirtualAddress> && addr == this->GetEndAddress()) {
|
||||||
this->num_pages += np;
|
m_num_pages += np;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -80,17 +80,17 @@ namespace ams::kern {
|
||||||
using BlockInfoList = util::IntrusiveListBaseTraits<KBlockInfo>::ListType;
|
using BlockInfoList = util::IntrusiveListBaseTraits<KBlockInfo>::ListType;
|
||||||
using iterator = BlockInfoList::const_iterator;
|
using iterator = BlockInfoList::const_iterator;
|
||||||
private:
|
private:
|
||||||
BlockInfoList block_list;
|
BlockInfoList m_block_list;
|
||||||
KBlockInfoManager *manager;
|
KBlockInfoManager *m_manager;
|
||||||
public:
|
public:
|
||||||
explicit KPageGroup(KBlockInfoManager *m) : block_list(), manager(m) { /* ... */ }
|
explicit KPageGroup(KBlockInfoManager *m) : m_block_list(), m_manager(m) { /* ... */ }
|
||||||
~KPageGroup() { this->Finalize(); }
|
~KPageGroup() { this->Finalize(); }
|
||||||
|
|
||||||
void Finalize();
|
void Finalize();
|
||||||
|
|
||||||
iterator begin() const { return this->block_list.begin(); }
|
iterator begin() const { return m_block_list.begin(); }
|
||||||
iterator end() const { return this->block_list.end(); }
|
iterator end() const { return m_block_list.end(); }
|
||||||
bool empty() const { return this->block_list.empty(); }
|
bool empty() const { return m_block_list.empty(); }
|
||||||
|
|
||||||
Result AddBlock(KVirtualAddress addr, size_t num_pages);
|
Result AddBlock(KVirtualAddress addr, size_t num_pages);
|
||||||
void Open() const;
|
void Open() const;
|
||||||
|
@ -111,14 +111,14 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KScopedPageGroup {
|
class KScopedPageGroup {
|
||||||
private:
|
private:
|
||||||
const KPageGroup *group;
|
const KPageGroup *m_pg;
|
||||||
public:
|
public:
|
||||||
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp) : group(gp) { if (this->group) { this->group->Open(); } }
|
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp) : m_pg(gp) { if (m_pg) { m_pg->Open(); } }
|
||||||
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ }
|
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ }
|
||||||
ALWAYS_INLINE ~KScopedPageGroup() { if (this->group) { this->group->Close(); } }
|
ALWAYS_INLINE ~KScopedPageGroup() { if (m_pg) { m_pg->Close(); } }
|
||||||
|
|
||||||
ALWAYS_INLINE void CancelClose() {
|
ALWAYS_INLINE void CancelClose() {
|
||||||
this->group = nullptr;
|
m_pg = nullptr;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -53,48 +53,48 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
class Block {
|
class Block {
|
||||||
private:
|
private:
|
||||||
KPageBitmap bitmap;
|
KPageBitmap m_bitmap;
|
||||||
KVirtualAddress heap_address;
|
KVirtualAddress m_heap_address;
|
||||||
uintptr_t end_offset;
|
uintptr_t m_end_offset;
|
||||||
size_t block_shift;
|
size_t m_block_shift;
|
||||||
size_t next_block_shift;
|
size_t m_next_block_shift;
|
||||||
public:
|
public:
|
||||||
Block() : bitmap(), heap_address(), end_offset(), block_shift(), next_block_shift() { /* ... */ }
|
Block() : m_bitmap(), m_heap_address(), m_end_offset(), m_block_shift(), m_next_block_shift() { /* ... */ }
|
||||||
|
|
||||||
constexpr size_t GetShift() const { return this->block_shift; }
|
constexpr size_t GetShift() const { return m_block_shift; }
|
||||||
constexpr size_t GetNextShift() const { return this->next_block_shift; }
|
constexpr size_t GetNextShift() const { return m_next_block_shift; }
|
||||||
constexpr size_t GetSize() const { return u64(1) << this->GetShift(); }
|
constexpr size_t GetSize() const { return u64(1) << this->GetShift(); }
|
||||||
constexpr size_t GetNumPages() const { return this->GetSize() / PageSize; }
|
constexpr size_t GetNumPages() const { return this->GetSize() / PageSize; }
|
||||||
constexpr size_t GetNumFreeBlocks() const { return this->bitmap.GetNumBits(); }
|
constexpr size_t GetNumFreeBlocks() const { return m_bitmap.GetNumBits(); }
|
||||||
constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); }
|
constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); }
|
||||||
|
|
||||||
u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
|
u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
|
||||||
/* Set shifts. */
|
/* Set shifts. */
|
||||||
this->block_shift = bs;
|
m_block_shift = bs;
|
||||||
this->next_block_shift = nbs;
|
m_next_block_shift = nbs;
|
||||||
|
|
||||||
/* Align up the address. */
|
/* Align up the address. */
|
||||||
KVirtualAddress end = addr + size;
|
KVirtualAddress end = addr + size;
|
||||||
const size_t align = (this->next_block_shift != 0) ? (u64(1) << this->next_block_shift) : (u64(1) << this->block_shift);
|
const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift) : (u64(1) << m_block_shift);
|
||||||
addr = util::AlignDown(GetInteger(addr), align);
|
addr = util::AlignDown(GetInteger(addr), align);
|
||||||
end = util::AlignUp(GetInteger(end), align);
|
end = util::AlignUp(GetInteger(end), align);
|
||||||
|
|
||||||
this->heap_address = addr;
|
m_heap_address = addr;
|
||||||
this->end_offset = (end - addr) / (u64(1) << this->block_shift);
|
m_end_offset = (end - addr) / (u64(1) << m_block_shift);
|
||||||
return this->bitmap.Initialize(bit_storage, this->end_offset);
|
return m_bitmap.Initialize(bit_storage, m_end_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress PushBlock(KVirtualAddress address) {
|
KVirtualAddress PushBlock(KVirtualAddress address) {
|
||||||
/* Set the bit for the free block. */
|
/* Set the bit for the free block. */
|
||||||
size_t offset = (address - this->heap_address) >> this->GetShift();
|
size_t offset = (address - m_heap_address) >> this->GetShift();
|
||||||
this->bitmap.SetBit(offset);
|
m_bitmap.SetBit(offset);
|
||||||
|
|
||||||
/* If we have a next shift, try to clear the blocks below this one and return the new address. */
|
/* If we have a next shift, try to clear the blocks below this one and return the new address. */
|
||||||
if (this->GetNextShift()) {
|
if (this->GetNextShift()) {
|
||||||
const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift());
|
const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift());
|
||||||
offset = util::AlignDown(offset, diff);
|
offset = util::AlignDown(offset, diff);
|
||||||
if (this->bitmap.ClearRange(offset, diff)) {
|
if (m_bitmap.ClearRange(offset, diff)) {
|
||||||
return this->heap_address + (offset << this->GetShift());
|
return m_heap_address + (offset << this->GetShift());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,15 +104,15 @@ namespace ams::kern {
|
||||||
|
|
||||||
KVirtualAddress PopBlock(bool random) {
|
KVirtualAddress PopBlock(bool random) {
|
||||||
/* Find a free block. */
|
/* Find a free block. */
|
||||||
ssize_t soffset = this->bitmap.FindFreeBlock(random);
|
ssize_t soffset = m_bitmap.FindFreeBlock(random);
|
||||||
if (soffset < 0) {
|
if (soffset < 0) {
|
||||||
return Null<KVirtualAddress>;
|
return Null<KVirtualAddress>;
|
||||||
}
|
}
|
||||||
const size_t offset = static_cast<size_t>(soffset);
|
const size_t offset = static_cast<size_t>(soffset);
|
||||||
|
|
||||||
/* Update our tracking and return it. */
|
/* Update our tracking and return it. */
|
||||||
this->bitmap.ClearBit(offset);
|
m_bitmap.ClearBit(offset);
|
||||||
return this->heap_address + (offset << this->GetShift());
|
return m_heap_address + (offset << this->GetShift());
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
static constexpr size_t CalculateManagementOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) {
|
static constexpr size_t CalculateManagementOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) {
|
||||||
|
@ -123,21 +123,21 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
KVirtualAddress heap_address;
|
KVirtualAddress m_heap_address;
|
||||||
size_t heap_size;
|
size_t m_heap_size;
|
||||||
size_t used_size;
|
size_t m_used_size;
|
||||||
size_t num_blocks;
|
size_t m_num_blocks;
|
||||||
Block blocks[NumMemoryBlockPageShifts];
|
Block m_blocks[NumMemoryBlockPageShifts];
|
||||||
private:
|
private:
|
||||||
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts);
|
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||||
size_t GetNumFreePages() const;
|
size_t GetNumFreePages() const;
|
||||||
|
|
||||||
void FreeBlock(KVirtualAddress block, s32 index);
|
void FreeBlock(KVirtualAddress block, s32 index);
|
||||||
public:
|
public:
|
||||||
KPageHeap() : heap_address(), heap_size(), used_size(), num_blocks(), blocks() { /* ... */ }
|
KPageHeap() : m_heap_address(), m_heap_size(), m_used_size(), m_num_blocks(), m_blocks() { /* ... */ }
|
||||||
|
|
||||||
constexpr KVirtualAddress GetAddress() const { return this->heap_address; }
|
constexpr KVirtualAddress GetAddress() const { return m_heap_address; }
|
||||||
constexpr size_t GetSize() const { return this->heap_size; }
|
constexpr size_t GetSize() const { return m_heap_size; }
|
||||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||||
constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; }
|
constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; }
|
||||||
constexpr size_t GetPageOffsetToEnd(KVirtualAddress block) const { return (this->GetEndAddress() - block) / PageSize; }
|
constexpr size_t GetPageOffsetToEnd(KVirtualAddress block) const { return (this->GetEndAddress() - block) / PageSize; }
|
||||||
|
@ -150,7 +150,7 @@ namespace ams::kern {
|
||||||
void DumpFreeList() const;
|
void DumpFreeList() const;
|
||||||
|
|
||||||
void UpdateUsedSize() {
|
void UpdateUsedSize() {
|
||||||
this->used_size = this->heap_size - (this->GetNumFreePages() * PageSize);
|
m_used_size = m_heap_size - (this->GetNumFreePages() * PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress AllocateBlock(s32 index, bool random);
|
KVirtualAddress AllocateBlock(s32 index, bool random);
|
||||||
|
|
|
@ -76,30 +76,30 @@ namespace ams::kern {
|
||||||
struct PageLinkedList {
|
struct PageLinkedList {
|
||||||
private:
|
private:
|
||||||
struct Node {
|
struct Node {
|
||||||
Node *next;
|
Node *m_next;
|
||||||
u8 buffer[PageSize - sizeof(Node *)];
|
u8 m_buffer[PageSize - sizeof(Node *)];
|
||||||
};
|
};
|
||||||
static_assert(util::is_pod<Node>::value);
|
static_assert(util::is_pod<Node>::value);
|
||||||
private:
|
private:
|
||||||
Node *root;
|
Node *m_root;
|
||||||
public:
|
public:
|
||||||
constexpr PageLinkedList() : root(nullptr) { /* ... */ }
|
constexpr PageLinkedList() : m_root(nullptr) { /* ... */ }
|
||||||
|
|
||||||
void Push(Node *n) {
|
void Push(Node *n) {
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
|
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
|
||||||
n->next = this->root;
|
n->m_next = m_root;
|
||||||
this->root = n;
|
m_root = n;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Push(KVirtualAddress addr) {
|
void Push(KVirtualAddress addr) {
|
||||||
this->Push(GetPointer<Node>(addr));
|
this->Push(GetPointer<Node>(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
Node *Peek() const { return this->root; }
|
Node *Peek() const { return m_root; }
|
||||||
|
|
||||||
Node *Pop() {
|
Node *Pop() {
|
||||||
Node *r = this->root;
|
Node *r = m_root;
|
||||||
this->root = this->root->next;
|
m_root = m_root->m_next;
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -122,82 +122,72 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
class KScopedPageTableUpdater {
|
class KScopedPageTableUpdater {
|
||||||
private:
|
private:
|
||||||
KPageTableBase *page_table;
|
KPageTableBase *m_pt;
|
||||||
PageLinkedList ll;
|
PageLinkedList m_ll;
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase *pt) : page_table(pt), ll() { /* ... */ }
|
ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase *pt) : m_pt(pt), m_ll() { /* ... */ }
|
||||||
ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase &pt) : KScopedPageTableUpdater(std::addressof(pt)) { /* ... */ }
|
ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase &pt) : KScopedPageTableUpdater(std::addressof(pt)) { /* ... */ }
|
||||||
ALWAYS_INLINE ~KScopedPageTableUpdater() { this->page_table->FinalizeUpdate(this->GetPageList()); }
|
ALWAYS_INLINE ~KScopedPageTableUpdater() { m_pt->FinalizeUpdate(this->GetPageList()); }
|
||||||
|
|
||||||
PageLinkedList *GetPageList() { return std::addressof(this->ll); }
|
PageLinkedList *GetPageList() { return std::addressof(m_ll); }
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
KProcessAddress address_space_start;
|
KProcessAddress m_address_space_start{};
|
||||||
KProcessAddress address_space_end;
|
KProcessAddress m_address_space_end{};
|
||||||
KProcessAddress heap_region_start;
|
KProcessAddress m_heap_region_start{};
|
||||||
KProcessAddress heap_region_end;
|
KProcessAddress m_heap_region_end{};
|
||||||
KProcessAddress current_heap_end;
|
KProcessAddress m_current_heap_end{};
|
||||||
KProcessAddress alias_region_start;
|
KProcessAddress m_alias_region_start{};
|
||||||
KProcessAddress alias_region_end;
|
KProcessAddress m_alias_region_end{};
|
||||||
KProcessAddress stack_region_start;
|
KProcessAddress m_stack_region_start{};
|
||||||
KProcessAddress stack_region_end;
|
KProcessAddress m_stack_region_end{};
|
||||||
KProcessAddress kernel_map_region_start;
|
KProcessAddress m_kernel_map_region_start{};
|
||||||
KProcessAddress kernel_map_region_end;
|
KProcessAddress m_kernel_map_region_end{};
|
||||||
KProcessAddress alias_code_region_start;
|
KProcessAddress m_alias_code_region_start{};
|
||||||
KProcessAddress alias_code_region_end;
|
KProcessAddress m_alias_code_region_end{};
|
||||||
KProcessAddress code_region_start;
|
KProcessAddress m_code_region_start{};
|
||||||
KProcessAddress code_region_end;
|
KProcessAddress m_code_region_end{};
|
||||||
size_t max_heap_size;
|
size_t m_max_heap_size{};
|
||||||
size_t mapped_physical_memory_size;
|
size_t m_mapped_physical_memory_size{};
|
||||||
size_t mapped_unsafe_physical_memory;
|
size_t m_mapped_unsafe_physical_memory{};
|
||||||
mutable KLightLock general_lock;
|
mutable KLightLock m_general_lock{};
|
||||||
mutable KLightLock map_physical_memory_lock;
|
mutable KLightLock m_map_physical_memory_lock{};
|
||||||
KPageTableImpl impl;
|
KPageTableImpl m_impl{};
|
||||||
KMemoryBlockManager memory_block_manager;
|
KMemoryBlockManager m_memory_block_manager{};
|
||||||
u32 allocate_option;
|
u32 m_allocate_option{};
|
||||||
u32 address_space_width;
|
u32 m_address_space_width{};
|
||||||
bool is_kernel;
|
bool m_is_kernel{};
|
||||||
bool enable_aslr;
|
bool m_enable_aslr{};
|
||||||
bool enable_device_address_space_merge;
|
bool m_enable_device_address_space_merge{};
|
||||||
KMemoryBlockSlabManager *memory_block_slab_manager;
|
KMemoryBlockSlabManager *m_memory_block_slab_manager{};
|
||||||
KBlockInfoManager *block_info_manager;
|
KBlockInfoManager *m_block_info_manager{};
|
||||||
const KMemoryRegion *cached_physical_linear_region;
|
const KMemoryRegion *m_cached_physical_linear_region{};
|
||||||
const KMemoryRegion *cached_physical_heap_region;
|
const KMemoryRegion *m_cached_physical_heap_region{};
|
||||||
const KMemoryRegion *cached_virtual_heap_region;
|
const KMemoryRegion *m_cached_virtual_heap_region{};
|
||||||
MemoryFillValue heap_fill_value;
|
MemoryFillValue m_heap_fill_value{};
|
||||||
MemoryFillValue ipc_fill_value;
|
MemoryFillValue m_ipc_fill_value{};
|
||||||
MemoryFillValue stack_fill_value;
|
MemoryFillValue m_stack_fill_value{};
|
||||||
public:
|
public:
|
||||||
constexpr KPageTableBase() :
|
constexpr KPageTableBase() { /* ... */ }
|
||||||
address_space_start(), address_space_end(), heap_region_start(), heap_region_end(), current_heap_end(),
|
|
||||||
alias_region_start(), alias_region_end(), stack_region_start(), stack_region_end(), kernel_map_region_start(),
|
|
||||||
kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(),
|
|
||||||
max_heap_size(), mapped_physical_memory_size(), mapped_unsafe_physical_memory(), general_lock(), map_physical_memory_lock(),
|
|
||||||
impl(), memory_block_manager(), allocate_option(), address_space_width(), is_kernel(), enable_aslr(), enable_device_address_space_merge(),
|
|
||||||
memory_block_slab_manager(), block_info_manager(), cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(),
|
|
||||||
heap_fill_value(), ipc_fill_value(), stack_fill_value()
|
|
||||||
{
|
|
||||||
/* ... */
|
|
||||||
}
|
|
||||||
|
|
||||||
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
|
||||||
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_device_address_space_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager);
|
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_device_address_space_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager);
|
||||||
|
|
||||||
void Finalize();
|
void Finalize();
|
||||||
|
|
||||||
constexpr bool IsKernel() const { return this->is_kernel; }
|
constexpr bool IsKernel() const { return m_is_kernel; }
|
||||||
constexpr bool IsAslrEnabled() const { return this->enable_aslr; }
|
constexpr bool IsAslrEnabled() const { return m_enable_aslr; }
|
||||||
|
|
||||||
constexpr bool Contains(KProcessAddress addr) const {
|
constexpr bool Contains(KProcessAddress addr) const {
|
||||||
return this->address_space_start <= addr && addr <= this->address_space_end - 1;
|
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool Contains(KProcessAddress addr, size_t size) const {
|
constexpr bool Contains(KProcessAddress addr, size_t size) const {
|
||||||
return this->address_space_start <= addr && addr < addr + size && addr + size - 1 <= this->address_space_end - 1;
|
return m_address_space_start <= addr && addr < addr + size && addr + size - 1 <= m_address_space_end - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
|
constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
|
||||||
return this->Contains(addr, size) && this->alias_region_start <= addr && addr + size - 1 <= this->alias_region_end - 1;
|
return this->Contains(addr, size) && m_alias_region_start <= addr && addr + size - 1 <= m_alias_region_end - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
|
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
|
||||||
|
@ -213,55 +203,55 @@ namespace ams::kern {
|
||||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
|
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
|
||||||
virtual void FinalizeUpdate(PageLinkedList *page_list) = 0;
|
virtual void FinalizeUpdate(PageLinkedList *page_list) = 0;
|
||||||
|
|
||||||
KPageTableImpl &GetImpl() { return this->impl; }
|
KPageTableImpl &GetImpl() { return m_impl; }
|
||||||
const KPageTableImpl &GetImpl() const { return this->impl; }
|
const KPageTableImpl &GetImpl() const { return m_impl; }
|
||||||
|
|
||||||
bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); }
|
bool IsLockedByCurrentThread() const { return m_general_lock.IsLockedByCurrentThread(); }
|
||||||
|
|
||||||
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
|
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
return KMemoryLayout::IsLinearMappedPhysicalAddress(this->cached_physical_linear_region, phys_addr);
|
return KMemoryLayout::IsLinearMappedPhysicalAddress(m_cached_physical_linear_region, phys_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
return KMemoryLayout::IsLinearMappedPhysicalAddress(this->cached_physical_linear_region, phys_addr, size);
|
return KMemoryLayout::IsLinearMappedPhysicalAddress(m_cached_physical_linear_region, phys_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
return KMemoryLayout::IsHeapPhysicalAddress(this->cached_physical_heap_region, phys_addr);
|
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
return KMemoryLayout::IsHeapPhysicalAddress(this->cached_physical_heap_region, phys_addr, size);
|
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
|
bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
|
||||||
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
return KMemoryLayout::IsHeapPhysicalAddress(this->cached_physical_heap_region, phys_addr);
|
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
|
bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
return KMemoryLayout::IsHeapVirtualAddress(this->cached_virtual_heap_region, virt_addr);
|
return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
|
bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
return KMemoryLayout::IsHeapVirtualAddress(this->cached_virtual_heap_region, virt_addr, size);
|
return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
||||||
return (this->address_space_start <= addr) && (num_pages <= (this->address_space_end - this->address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= this->address_space_end - 1);
|
return (m_address_space_start <= addr) && (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
constexpr size_t GetNumGuardPages() const { return this->IsKernel() ? 1 : 4; }
|
constexpr size_t GetNumGuardPages() const { return this->IsKernel() ? 1 : 4; }
|
||||||
|
@ -308,7 +298,7 @@ namespace ams::kern {
|
||||||
return this->GetImpl().GetPhysicalAddress(out, virt_addr);
|
return this->GetImpl().GetPhysicalAddress(out, virt_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
KBlockInfoManager *GetBlockInfoManager() const { return this->block_info_manager; }
|
KBlockInfoManager *GetBlockInfoManager() const { return m_block_info_manager; }
|
||||||
|
|
||||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
|
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
|
||||||
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
|
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
|
||||||
|
@ -386,43 +376,43 @@ namespace ams::kern {
|
||||||
|
|
||||||
void DumpMemoryBlocksLocked() const {
|
void DumpMemoryBlocksLocked() const {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
this->memory_block_manager.DumpBlocks();
|
m_memory_block_manager.DumpBlocks();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DumpMemoryBlocks() const {
|
void DumpMemoryBlocks() const {
|
||||||
KScopedLightLock lk(this->general_lock);
|
KScopedLightLock lk(m_general_lock);
|
||||||
this->DumpMemoryBlocksLocked();
|
this->DumpMemoryBlocksLocked();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DumpPageTable() const {
|
void DumpPageTable() const {
|
||||||
KScopedLightLock lk(this->general_lock);
|
KScopedLightLock lk(m_general_lock);
|
||||||
this->GetImpl().Dump(GetInteger(this->address_space_start), this->address_space_end - this->address_space_start);
|
this->GetImpl().Dump(GetInteger(m_address_space_start), m_address_space_end - m_address_space_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CountPageTables() const {
|
size_t CountPageTables() const {
|
||||||
KScopedLightLock lk(this->general_lock);
|
KScopedLightLock lk(m_general_lock);
|
||||||
return this->GetImpl().CountPageTables();
|
return this->GetImpl().CountPageTables();
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
KProcessAddress GetAddressSpaceStart() const { return this->address_space_start; }
|
KProcessAddress GetAddressSpaceStart() const { return m_address_space_start; }
|
||||||
KProcessAddress GetHeapRegionStart() const { return this->heap_region_start; }
|
KProcessAddress GetHeapRegionStart() const { return m_heap_region_start; }
|
||||||
KProcessAddress GetAliasRegionStart() const { return this->alias_region_start; }
|
KProcessAddress GetAliasRegionStart() const { return m_alias_region_start; }
|
||||||
KProcessAddress GetStackRegionStart() const { return this->stack_region_start; }
|
KProcessAddress GetStackRegionStart() const { return m_stack_region_start; }
|
||||||
KProcessAddress GetKernelMapRegionStart() const { return this->kernel_map_region_start; }
|
KProcessAddress GetKernelMapRegionStart() const { return m_kernel_map_region_start; }
|
||||||
KProcessAddress GetAliasCodeRegionStart() const { return this->alias_code_region_start; }
|
KProcessAddress GetAliasCodeRegionStart() const { return m_alias_code_region_start; }
|
||||||
|
|
||||||
size_t GetAddressSpaceSize() const { return this->address_space_end - this->address_space_start; }
|
size_t GetAddressSpaceSize() const { return m_address_space_end - m_address_space_start; }
|
||||||
size_t GetHeapRegionSize() const { return this->heap_region_end - this->heap_region_start; }
|
size_t GetHeapRegionSize() const { return m_heap_region_end - m_heap_region_start; }
|
||||||
size_t GetAliasRegionSize() const { return this->alias_region_end - this->alias_region_start; }
|
size_t GetAliasRegionSize() const { return m_alias_region_end - m_alias_region_start; }
|
||||||
size_t GetStackRegionSize() const { return this->stack_region_end - this->stack_region_start; }
|
size_t GetStackRegionSize() const { return m_stack_region_end - m_stack_region_start; }
|
||||||
size_t GetKernelMapRegionSize() const { return this->kernel_map_region_end - this->kernel_map_region_start; }
|
size_t GetKernelMapRegionSize() const { return m_kernel_map_region_end - m_kernel_map_region_start; }
|
||||||
size_t GetAliasCodeRegionSize() const { return this->alias_code_region_end - this->alias_code_region_start; }
|
size_t GetAliasCodeRegionSize() const { return m_alias_code_region_end - m_alias_code_region_start; }
|
||||||
|
|
||||||
size_t GetNormalMemorySize() const {
|
size_t GetNormalMemorySize() const {
|
||||||
/* Lock the table. */
|
/* Lock the table. */
|
||||||
KScopedLightLock lk(this->general_lock);
|
KScopedLightLock lk(m_general_lock);
|
||||||
|
|
||||||
return (this->current_heap_end - this->heap_region_start) + this->mapped_physical_memory_size;
|
return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetCodeSize() const;
|
size_t GetCodeSize() const;
|
||||||
|
@ -430,7 +420,7 @@ namespace ams::kern {
|
||||||
size_t GetAliasCodeSize() const;
|
size_t GetAliasCodeSize() const;
|
||||||
size_t GetAliasCodeDataSize() const;
|
size_t GetAliasCodeDataSize() const;
|
||||||
|
|
||||||
u32 GetAllocateOption() const { return this->allocate_option; }
|
u32 GetAllocateOption() const { return m_allocate_option; }
|
||||||
public:
|
public:
|
||||||
static ALWAYS_INLINE KVirtualAddress GetLinearMappedVirtualAddress(KPhysicalAddress addr) {
|
static ALWAYS_INLINE KVirtualAddress GetLinearMappedVirtualAddress(KPhysicalAddress addr) {
|
||||||
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
||||||
|
|
|
@ -24,7 +24,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
class PageTablePage {
|
class PageTablePage {
|
||||||
private:
|
private:
|
||||||
u8 buffer[PageSize];
|
u8 m_buffer[PageSize];
|
||||||
};
|
};
|
||||||
static_assert(sizeof(PageTablePage) == PageSize);
|
static_assert(sizeof(PageTablePage) == PageSize);
|
||||||
|
|
||||||
|
@ -38,23 +38,23 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
using BaseHeap = KDynamicSlabHeap<impl::PageTablePage>;
|
using BaseHeap = KDynamicSlabHeap<impl::PageTablePage>;
|
||||||
private:
|
private:
|
||||||
RefCount *ref_counts;
|
RefCount *m_ref_counts;
|
||||||
public:
|
public:
|
||||||
static constexpr size_t CalculateReferenceCountSize(size_t size) {
|
static constexpr size_t CalculateReferenceCountSize(size_t size) {
|
||||||
return (size / PageSize) * sizeof(RefCount);
|
return (size / PageSize) * sizeof(RefCount);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KPageTableManager() : BaseHeap(), ref_counts() { /* ... */ }
|
constexpr KPageTableManager() : BaseHeap(), m_ref_counts() { /* ... */ }
|
||||||
private:
|
private:
|
||||||
void Initialize(RefCount *rc) {
|
void Initialize(RefCount *rc) {
|
||||||
this->ref_counts = rc;
|
m_ref_counts = rc;
|
||||||
for (size_t i = 0; i < this->GetSize() / PageSize; i++) {
|
for (size_t i = 0; i < this->GetSize() / PageSize; i++) {
|
||||||
this->ref_counts[i] = 0;
|
m_ref_counts[i] = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr RefCount *GetRefCountPointer(KVirtualAddress addr) const {
|
constexpr RefCount *GetRefCountPointer(KVirtualAddress addr) const {
|
||||||
return std::addressof(this->ref_counts[(addr - this->GetAddress()) / PageSize]);
|
return std::addressof(m_ref_counts[(addr - this->GetAddress()) / PageSize]);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
void Initialize(KDynamicPageManager *page_allocator, RefCount *rc) {
|
void Initialize(KDynamicPageManager *page_allocator, RefCount *rc) {
|
||||||
|
|
|
@ -35,13 +35,13 @@ namespace ams::kern {
|
||||||
ServerClosed = 3,
|
ServerClosed = 3,
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
KServerPort server;
|
KServerPort m_server;
|
||||||
KClientPort client;
|
KClientPort m_client;
|
||||||
uintptr_t name;
|
uintptr_t m_name;
|
||||||
State state;
|
State m_state;
|
||||||
bool is_light;
|
bool m_is_light;
|
||||||
public:
|
public:
|
||||||
constexpr KPort() : server(), client(), name(), state(State::Invalid), is_light() { /* ... */ }
|
constexpr KPort() : m_server(), m_client(), m_name(), m_state(State::Invalid), m_is_light() { /* ... */ }
|
||||||
virtual ~KPort() { /* ... */ }
|
virtual ~KPort() { /* ... */ }
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
@ -50,16 +50,16 @@ namespace ams::kern {
|
||||||
void OnClientClosed();
|
void OnClientClosed();
|
||||||
void OnServerClosed();
|
void OnServerClosed();
|
||||||
|
|
||||||
uintptr_t GetName() const { return this->name; }
|
uintptr_t GetName() const { return m_name; }
|
||||||
bool IsLight() const { return this->is_light; }
|
bool IsLight() const { return m_is_light; }
|
||||||
|
|
||||||
Result EnqueueSession(KServerSession *session);
|
Result EnqueueSession(KServerSession *session);
|
||||||
Result EnqueueSession(KLightServerSession *session);
|
Result EnqueueSession(KLightServerSession *session);
|
||||||
|
|
||||||
KClientPort &GetClientPort() { return this->client; }
|
KClientPort &GetClientPort() { return m_client; }
|
||||||
KServerPort &GetServerPort() { return this->server; }
|
KServerPort &GetServerPort() { return m_server; }
|
||||||
const KClientPort &GetClientPort() const { return this->client; }
|
const KClientPort &GetClientPort() const { return m_client; }
|
||||||
const KServerPort &GetServerPort() const { return this->server; }
|
const KServerPort &GetServerPort() const { return m_server; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,11 +68,11 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
class KPerCoreQueue {
|
class KPerCoreQueue {
|
||||||
private:
|
private:
|
||||||
Entry root[NumCores];
|
Entry m_root[NumCores];
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KPerCoreQueue() : root() {
|
constexpr ALWAYS_INLINE KPerCoreQueue() : m_root() {
|
||||||
for (size_t i = 0; i < NumCores; i++) {
|
for (size_t i = 0; i < NumCores; i++) {
|
||||||
this->root[i].Initialize();
|
m_root[i].Initialize();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,14 +81,14 @@ namespace ams::kern {
|
||||||
Entry &member_entry = member->GetPriorityQueueEntry(core);
|
Entry &member_entry = member->GetPriorityQueueEntry(core);
|
||||||
|
|
||||||
/* Get the entry associated with the end of the queue. */
|
/* Get the entry associated with the end of the queue. */
|
||||||
Member *tail = this->root[core].GetPrev();
|
Member *tail = m_root[core].GetPrev();
|
||||||
Entry &tail_entry = (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
|
Entry &tail_entry = (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : m_root[core];
|
||||||
|
|
||||||
/* Link the entries. */
|
/* Link the entries. */
|
||||||
member_entry.SetPrev(tail);
|
member_entry.SetPrev(tail);
|
||||||
member_entry.SetNext(nullptr);
|
member_entry.SetNext(nullptr);
|
||||||
tail_entry.SetNext(member);
|
tail_entry.SetNext(member);
|
||||||
this->root[core].SetPrev(member);
|
m_root[core].SetPrev(member);
|
||||||
|
|
||||||
return (tail == nullptr);
|
return (tail == nullptr);
|
||||||
}
|
}
|
||||||
|
@ -98,14 +98,14 @@ namespace ams::kern {
|
||||||
Entry &member_entry = member->GetPriorityQueueEntry(core);
|
Entry &member_entry = member->GetPriorityQueueEntry(core);
|
||||||
|
|
||||||
/* Get the entry associated with the front of the queue. */
|
/* Get the entry associated with the front of the queue. */
|
||||||
Member *head = this->root[core].GetNext();
|
Member *head = m_root[core].GetNext();
|
||||||
Entry &head_entry = (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
|
Entry &head_entry = (head != nullptr) ? head->GetPriorityQueueEntry(core) : m_root[core];
|
||||||
|
|
||||||
/* Link the entries. */
|
/* Link the entries. */
|
||||||
member_entry.SetPrev(nullptr);
|
member_entry.SetPrev(nullptr);
|
||||||
member_entry.SetNext(head);
|
member_entry.SetNext(head);
|
||||||
head_entry.SetPrev(member);
|
head_entry.SetPrev(member);
|
||||||
this->root[core].SetNext(member);
|
m_root[core].SetNext(member);
|
||||||
|
|
||||||
return (head == nullptr);
|
return (head == nullptr);
|
||||||
}
|
}
|
||||||
|
@ -117,8 +117,8 @@ namespace ams::kern {
|
||||||
/* Get the entries associated with next and prev. */
|
/* Get the entries associated with next and prev. */
|
||||||
Member *prev = member_entry.GetPrev();
|
Member *prev = member_entry.GetPrev();
|
||||||
Member *next = member_entry.GetNext();
|
Member *next = member_entry.GetNext();
|
||||||
Entry &prev_entry = (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core];
|
Entry &prev_entry = (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : m_root[core];
|
||||||
Entry &next_entry = (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
|
Entry &next_entry = (next != nullptr) ? next->GetPriorityQueueEntry(core) : m_root[core];
|
||||||
|
|
||||||
/* Unlink. */
|
/* Unlink. */
|
||||||
prev_entry.SetNext(next);
|
prev_entry.SetNext(next);
|
||||||
|
@ -128,24 +128,24 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE Member *GetFront(s32 core) const {
|
constexpr ALWAYS_INLINE Member *GetFront(s32 core) const {
|
||||||
return this->root[core].GetNext();
|
return m_root[core].GetNext();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class KPriorityQueueImpl {
|
class KPriorityQueueImpl {
|
||||||
private:
|
private:
|
||||||
KPerCoreQueue queues[NumPriority];
|
KPerCoreQueue m_queues[NumPriority];
|
||||||
util::BitSet64<NumPriority> available_priorities[NumCores];
|
util::BitSet64<NumPriority> m_available_priorities[NumCores];
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KPriorityQueueImpl() : queues(), available_priorities() { /* ... */ }
|
constexpr ALWAYS_INLINE KPriorityQueueImpl() : m_queues(), m_available_priorities() { /* ... */ }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void PushBack(s32 priority, s32 core, Member *member) {
|
constexpr ALWAYS_INLINE void PushBack(s32 priority, s32 core, Member *member) {
|
||||||
MESOSPHERE_ASSERT(IsValidCore(core));
|
MESOSPHERE_ASSERT(IsValidCore(core));
|
||||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||||
if (this->queues[priority].PushBack(core, member)) {
|
if (m_queues[priority].PushBack(core, member)) {
|
||||||
this->available_priorities[core].SetBit(priority);
|
m_available_priorities[core].SetBit(priority);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -155,8 +155,8 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||||
if (this->queues[priority].PushFront(core, member)) {
|
if (m_queues[priority].PushFront(core, member)) {
|
||||||
this->available_priorities[core].SetBit(priority);
|
m_available_priorities[core].SetBit(priority);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -166,8 +166,8 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||||
if (this->queues[priority].Remove(core, member)) {
|
if (m_queues[priority].Remove(core, member)) {
|
||||||
this->available_priorities[core].ClearBit(priority);
|
m_available_priorities[core].ClearBit(priority);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -175,9 +175,9 @@ namespace ams::kern {
|
||||||
constexpr ALWAYS_INLINE Member *GetFront(s32 core) const {
|
constexpr ALWAYS_INLINE Member *GetFront(s32 core) const {
|
||||||
MESOSPHERE_ASSERT(IsValidCore(core));
|
MESOSPHERE_ASSERT(IsValidCore(core));
|
||||||
|
|
||||||
const s32 priority = this->available_priorities[core].CountLeadingZero();
|
const s32 priority = m_available_priorities[core].CountLeadingZero();
|
||||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||||
return this->queues[priority].GetFront(core);
|
return m_queues[priority].GetFront(core);
|
||||||
} else {
|
} else {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -188,7 +188,7 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||||
return this->queues[priority].GetFront(core);
|
return m_queues[priority].GetFront(core);
|
||||||
} else {
|
} else {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -199,9 +199,9 @@ namespace ams::kern {
|
||||||
|
|
||||||
Member *next = member->GetPriorityQueueEntry(core).GetNext();
|
Member *next = member->GetPriorityQueueEntry(core).GetNext();
|
||||||
if (next == nullptr) {
|
if (next == nullptr) {
|
||||||
const s32 priority = this->available_priorities[core].GetNextSet(member->GetPriority());
|
const s32 priority = m_available_priorities[core].GetNextSet(member->GetPriority());
|
||||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||||
next = this->queues[priority].GetFront(core);
|
next = m_queues[priority].GetFront(core);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return next;
|
return next;
|
||||||
|
@ -212,8 +212,8 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||||
this->queues[priority].Remove(core, member);
|
m_queues[priority].Remove(core, member);
|
||||||
this->queues[priority].PushFront(core, member);
|
m_queues[priority].PushFront(core, member);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,17 +222,17 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
MESOSPHERE_ASSERT(IsValidPriority(priority));
|
||||||
|
|
||||||
if (AMS_LIKELY(priority <= LowestPriority)) {
|
if (AMS_LIKELY(priority <= LowestPriority)) {
|
||||||
this->queues[priority].Remove(core, member);
|
m_queues[priority].Remove(core, member);
|
||||||
this->queues[priority].PushBack(core, member);
|
m_queues[priority].PushBack(core, member);
|
||||||
return this->queues[priority].GetFront(core);
|
return m_queues[priority].GetFront(core);
|
||||||
} else {
|
} else {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
KPriorityQueueImpl scheduled_queue;
|
KPriorityQueueImpl m_scheduled_queue;
|
||||||
KPriorityQueueImpl suggested_queue;
|
KPriorityQueueImpl m_suggested_queue;
|
||||||
private:
|
private:
|
||||||
constexpr ALWAYS_INLINE void ClearAffinityBit(u64 &affinity, s32 core) {
|
constexpr ALWAYS_INLINE void ClearAffinityBit(u64 &affinity, s32 core) {
|
||||||
affinity &= ~(u64(1ul) << core);
|
affinity &= ~(u64(1ul) << core);
|
||||||
|
@ -250,13 +250,13 @@ namespace ams::kern {
|
||||||
/* Push onto the scheduled queue for its core, if we can. */
|
/* Push onto the scheduled queue for its core, if we can. */
|
||||||
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||||
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||||
this->scheduled_queue.PushBack(priority, core, member);
|
m_scheduled_queue.PushBack(priority, core, member);
|
||||||
ClearAffinityBit(affinity, core);
|
ClearAffinityBit(affinity, core);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* And suggest the thread for all other cores. */
|
/* And suggest the thread for all other cores. */
|
||||||
while (affinity) {
|
while (affinity) {
|
||||||
this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
|
m_suggested_queue.PushBack(priority, GetNextCore(affinity), member);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -266,14 +266,14 @@ namespace ams::kern {
|
||||||
/* Push onto the scheduled queue for its core, if we can. */
|
/* Push onto the scheduled queue for its core, if we can. */
|
||||||
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||||
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||||
this->scheduled_queue.PushFront(priority, core, member);
|
m_scheduled_queue.PushFront(priority, core, member);
|
||||||
ClearAffinityBit(affinity, core);
|
ClearAffinityBit(affinity, core);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* And suggest the thread for all other cores. */
|
/* And suggest the thread for all other cores. */
|
||||||
/* Note: Nintendo pushes onto the back of the suggested queue, not the front. */
|
/* Note: Nintendo pushes onto the back of the suggested queue, not the front. */
|
||||||
while (affinity) {
|
while (affinity) {
|
||||||
this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
|
m_suggested_queue.PushBack(priority, GetNextCore(affinity), member);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,41 +283,41 @@ namespace ams::kern {
|
||||||
/* Remove from the scheduled queue for its core. */
|
/* Remove from the scheduled queue for its core. */
|
||||||
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
u64 affinity = member->GetAffinityMask().GetAffinityMask();
|
||||||
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
if (const s32 core = member->GetActiveCore(); core >= 0) {
|
||||||
this->scheduled_queue.Remove(priority, core, member);
|
m_scheduled_queue.Remove(priority, core, member);
|
||||||
ClearAffinityBit(affinity, core);
|
ClearAffinityBit(affinity, core);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Remove from the suggested queue for all other cores. */
|
/* Remove from the suggested queue for all other cores. */
|
||||||
while (affinity) {
|
while (affinity) {
|
||||||
this->suggested_queue.Remove(priority, GetNextCore(affinity), member);
|
m_suggested_queue.Remove(priority, GetNextCore(affinity), member);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */ }
|
constexpr ALWAYS_INLINE KPriorityQueue() : m_scheduled_queue(), m_suggested_queue() { /* ... */ }
|
||||||
|
|
||||||
/* Getters. */
|
/* Getters. */
|
||||||
constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core) const {
|
constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core) const {
|
||||||
return this->scheduled_queue.GetFront(core);
|
return m_scheduled_queue.GetFront(core);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core, s32 priority) const {
|
constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core, s32 priority) const {
|
||||||
return this->scheduled_queue.GetFront(priority, core);
|
return m_scheduled_queue.GetFront(priority, core);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core) const {
|
constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core) const {
|
||||||
return this->suggested_queue.GetFront(core);
|
return m_suggested_queue.GetFront(core);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core, s32 priority) const {
|
constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core, s32 priority) const {
|
||||||
return this->suggested_queue.GetFront(priority, core);
|
return m_suggested_queue.GetFront(priority, core);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE Member *GetScheduledNext(s32 core, const Member *member) const {
|
constexpr ALWAYS_INLINE Member *GetScheduledNext(s32 core, const Member *member) const {
|
||||||
return this->scheduled_queue.GetNext(core, member);
|
return m_scheduled_queue.GetNext(core, member);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE Member *GetSuggestedNext(s32 core, const Member *member) const {
|
constexpr ALWAYS_INLINE Member *GetSuggestedNext(s32 core, const Member *member) const {
|
||||||
return this->suggested_queue.GetNext(core, member);
|
return m_suggested_queue.GetNext(core, member);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE Member *GetSamePriorityNext(s32 core, const Member *member) const {
|
constexpr ALWAYS_INLINE Member *GetSamePriorityNext(s32 core, const Member *member) const {
|
||||||
|
@ -334,11 +334,11 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void MoveToScheduledFront(Member *member) {
|
constexpr ALWAYS_INLINE void MoveToScheduledFront(Member *member) {
|
||||||
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
|
m_scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KThread *MoveToScheduledBack(Member *member) {
|
constexpr ALWAYS_INLINE KThread *MoveToScheduledBack(Member *member) {
|
||||||
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member);
|
return m_scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* First class fancy operations. */
|
/* First class fancy operations. */
|
||||||
|
@ -367,9 +367,9 @@ namespace ams::kern {
|
||||||
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
|
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
|
||||||
if (prev_affinity.GetAffinity(core)) {
|
if (prev_affinity.GetAffinity(core)) {
|
||||||
if (core == prev_core) {
|
if (core == prev_core) {
|
||||||
this->scheduled_queue.Remove(priority, core, member);
|
m_scheduled_queue.Remove(priority, core, member);
|
||||||
} else {
|
} else {
|
||||||
this->suggested_queue.Remove(priority, core, member);
|
m_suggested_queue.Remove(priority, core, member);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -378,9 +378,9 @@ namespace ams::kern {
|
||||||
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
|
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
|
||||||
if (new_affinity.GetAffinity(core)) {
|
if (new_affinity.GetAffinity(core)) {
|
||||||
if (core == new_core) {
|
if (core == new_core) {
|
||||||
this->scheduled_queue.PushBack(priority, core, member);
|
m_scheduled_queue.PushBack(priority, core, member);
|
||||||
} else {
|
} else {
|
||||||
this->suggested_queue.PushBack(priority, core, member);
|
m_suggested_queue.PushBack(priority, core, member);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -395,22 +395,22 @@ namespace ams::kern {
|
||||||
if (prev_core != new_core) {
|
if (prev_core != new_core) {
|
||||||
/* Remove from the scheduled queue for the previous core. */
|
/* Remove from the scheduled queue for the previous core. */
|
||||||
if (prev_core >= 0) {
|
if (prev_core >= 0) {
|
||||||
this->scheduled_queue.Remove(priority, prev_core, member);
|
m_scheduled_queue.Remove(priority, prev_core, member);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Remove from the suggested queue and add to the scheduled queue for the new core. */
|
/* Remove from the suggested queue and add to the scheduled queue for the new core. */
|
||||||
if (new_core >= 0) {
|
if (new_core >= 0) {
|
||||||
this->suggested_queue.Remove(priority, new_core, member);
|
m_suggested_queue.Remove(priority, new_core, member);
|
||||||
if (to_front) {
|
if (to_front) {
|
||||||
this->scheduled_queue.PushFront(priority, new_core, member);
|
m_scheduled_queue.PushFront(priority, new_core, member);
|
||||||
} else {
|
} else {
|
||||||
this->scheduled_queue.PushBack(priority, new_core, member);
|
m_scheduled_queue.PushBack(priority, new_core, member);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add to the suggested queue for the previous core. */
|
/* Add to the suggested queue for the previous core. */
|
||||||
if (prev_core >= 0) {
|
if (prev_core >= 0) {
|
||||||
this->suggested_queue.PushBack(priority, prev_core, member);
|
m_suggested_queue.PushBack(priority, prev_core, member);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,79 +48,79 @@ namespace ams::kern {
|
||||||
State_DebugBreak = ams::svc::ProcessState_DebugBreak,
|
State_DebugBreak = ams::svc::ProcessState_DebugBreak,
|
||||||
};
|
};
|
||||||
|
|
||||||
using ThreadList = util::IntrusiveListMemberTraits<&KThread::process_list_node>::ListType;
|
using ThreadList = util::IntrusiveListMemberTraits<&KThread::m_process_list_node>::ListType;
|
||||||
|
|
||||||
static constexpr size_t AslrAlignment = KernelAslrAlignment;
|
static constexpr size_t AslrAlignment = KernelAslrAlignment;
|
||||||
private:
|
private:
|
||||||
using SharedMemoryInfoList = util::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
|
using SharedMemoryInfoList = util::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
|
||||||
using BetaList = util::IntrusiveListMemberTraits<&KBeta::process_list_node>::ListType;
|
using BetaList = util::IntrusiveListMemberTraits<&KBeta::m_process_list_node>::ListType;
|
||||||
using TLPTree = util::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
using TLPTree = util::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
||||||
using TLPIterator = TLPTree::iterator;
|
using TLPIterator = TLPTree::iterator;
|
||||||
private:
|
private:
|
||||||
KProcessPageTable page_table{};
|
KProcessPageTable m_page_table{};
|
||||||
std::atomic<size_t> used_kernel_memory_size{};
|
std::atomic<size_t> m_used_kernel_memory_size{};
|
||||||
TLPTree fully_used_tlp_tree{};
|
TLPTree m_fully_used_tlp_tree{};
|
||||||
TLPTree partially_used_tlp_tree{};
|
TLPTree m_partially_used_tlp_tree{};
|
||||||
s32 ideal_core_id{};
|
s32 m_ideal_core_id{};
|
||||||
void *attached_object{};
|
void *m_attached_object{};
|
||||||
KResourceLimit *resource_limit{};
|
KResourceLimit *m_resource_limit{};
|
||||||
KVirtualAddress system_resource_address{};
|
KVirtualAddress m_system_resource_address{};
|
||||||
size_t system_resource_num_pages{};
|
size_t m_system_resource_num_pages{};
|
||||||
size_t memory_release_hint{};
|
size_t m_memory_release_hint{};
|
||||||
State state{};
|
State m_state{};
|
||||||
KLightLock state_lock{};
|
KLightLock m_state_lock{};
|
||||||
KLightLock list_lock{};
|
KLightLock m_list_lock{};
|
||||||
KConditionVariable cond_var{};
|
KConditionVariable m_cond_var{};
|
||||||
KAddressArbiter address_arbiter{};
|
KAddressArbiter m_address_arbiter{};
|
||||||
u64 entropy[4]{};
|
u64 m_entropy[4]{};
|
||||||
bool is_signaled{};
|
bool m_is_signaled{};
|
||||||
bool is_initialized{};
|
bool m_is_initialized{};
|
||||||
bool is_application{};
|
bool m_is_application{};
|
||||||
char name[13]{};
|
char m_name[13]{};
|
||||||
std::atomic<u16> num_threads{};
|
std::atomic<u16> m_num_threads{};
|
||||||
u16 peak_num_threads{};
|
u16 m_peak_num_threads{};
|
||||||
u32 flags{};
|
u32 m_flags{};
|
||||||
KMemoryManager::Pool memory_pool{};
|
KMemoryManager::Pool m_memory_pool{};
|
||||||
s64 schedule_count{};
|
s64 m_schedule_count{};
|
||||||
KCapabilities capabilities{};
|
KCapabilities m_capabilities{};
|
||||||
ams::svc::ProgramId program_id{};
|
ams::svc::ProgramId m_program_id{};
|
||||||
u64 process_id{};
|
u64 m_process_id{};
|
||||||
s64 creation_time{};
|
s64 m_creation_time{};
|
||||||
KProcessAddress code_address{};
|
KProcessAddress m_code_address{};
|
||||||
size_t code_size{};
|
size_t m_code_size{};
|
||||||
size_t main_thread_stack_size{};
|
size_t m_main_thread_stack_size{};
|
||||||
size_t max_process_memory{};
|
size_t m_max_process_memory{};
|
||||||
u32 version{};
|
u32 m_version{};
|
||||||
KHandleTable handle_table{};
|
KHandleTable m_handle_table{};
|
||||||
KProcessAddress plr_address{};
|
KProcessAddress m_plr_address{};
|
||||||
void *plr_heap_address{};
|
void *m_plr_heap_address{};
|
||||||
KThread *exception_thread{};
|
KThread *m_exception_thread{};
|
||||||
ThreadList thread_list{};
|
ThreadList m_thread_list{};
|
||||||
SharedMemoryInfoList shared_memory_list{};
|
SharedMemoryInfoList m_shared_memory_list{};
|
||||||
BetaList beta_list{};
|
BetaList m_beta_list{};
|
||||||
bool is_suspended{};
|
bool m_is_suspended{};
|
||||||
bool is_jit_debug{};
|
bool m_is_jit_debug{};
|
||||||
ams::svc::DebugEvent jit_debug_event_type{};
|
ams::svc::DebugEvent m_jit_debug_event_type{};
|
||||||
ams::svc::DebugException jit_debug_exception_type{};
|
ams::svc::DebugException m_jit_debug_exception_type{};
|
||||||
uintptr_t jit_debug_params[4]{};
|
uintptr_t m_jit_debug_params[4]{};
|
||||||
u64 jit_debug_thread_id{};
|
u64 m_jit_debug_thread_id{};
|
||||||
KWaitObject wait_object{};
|
KWaitObject m_wait_object{};
|
||||||
KThread *running_threads[cpu::NumCores]{};
|
KThread *m_running_threads[cpu::NumCores]{};
|
||||||
u64 running_thread_idle_counts[cpu::NumCores]{};
|
u64 m_running_thread_idle_counts[cpu::NumCores]{};
|
||||||
KThread *pinned_threads[cpu::NumCores]{};
|
KThread *m_pinned_threads[cpu::NumCores]{};
|
||||||
std::atomic<s32> num_created_threads{};
|
std::atomic<s32> m_num_created_threads{};
|
||||||
std::atomic<s64> cpu_time{};
|
std::atomic<s64> m_cpu_time{};
|
||||||
std::atomic<s64> num_process_switches{};
|
std::atomic<s64> m_num_process_switches{};
|
||||||
std::atomic<s64> num_thread_switches{};
|
std::atomic<s64> m_num_thread_switches{};
|
||||||
std::atomic<s64> num_fpu_switches{};
|
std::atomic<s64> m_num_fpu_switches{};
|
||||||
std::atomic<s64> num_supervisor_calls{};
|
std::atomic<s64> m_num_supervisor_calls{};
|
||||||
std::atomic<s64> num_ipc_messages{};
|
std::atomic<s64> m_num_ipc_messages{};
|
||||||
std::atomic<s64> num_ipc_replies{};
|
std::atomic<s64> m_num_ipc_replies{};
|
||||||
std::atomic<s64> num_ipc_receives{};
|
std::atomic<s64> m_num_ipc_receives{};
|
||||||
KDynamicPageManager dynamic_page_manager{};
|
KDynamicPageManager m_dynamic_page_manager{};
|
||||||
KMemoryBlockSlabManager memory_block_slab_manager{};
|
KMemoryBlockSlabManager m_memory_block_slab_manager{};
|
||||||
KBlockInfoManager block_info_manager{};
|
KBlockInfoManager m_block_info_manager{};
|
||||||
KPageTableManager page_table_manager{};
|
KPageTableManager m_page_table_manager{};
|
||||||
private:
|
private:
|
||||||
Result Initialize(const ams::svc::CreateProcessParameter ¶ms);
|
Result Initialize(const ams::svc::CreateProcessParameter ¶ms);
|
||||||
|
|
||||||
|
@ -130,15 +130,15 @@ namespace ams::kern {
|
||||||
void PinThread(s32 core_id, KThread *thread) {
|
void PinThread(s32 core_id, KThread *thread) {
|
||||||
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
||||||
MESOSPHERE_ASSERT(thread != nullptr);
|
MESOSPHERE_ASSERT(thread != nullptr);
|
||||||
MESOSPHERE_ASSERT(this->pinned_threads[core_id] == nullptr);
|
MESOSPHERE_ASSERT(m_pinned_threads[core_id] == nullptr);
|
||||||
this->pinned_threads[core_id] = thread;
|
m_pinned_threads[core_id] = thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnpinThread(s32 core_id, KThread *thread) {
|
void UnpinThread(s32 core_id, KThread *thread) {
|
||||||
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
||||||
MESOSPHERE_ASSERT(thread != nullptr);
|
MESOSPHERE_ASSERT(thread != nullptr);
|
||||||
MESOSPHERE_ASSERT(this->pinned_threads[core_id] == thread);
|
MESOSPHERE_ASSERT(m_pinned_threads[core_id] == thread);
|
||||||
this->pinned_threads[core_id] = nullptr;
|
m_pinned_threads[core_id] = nullptr;
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
KProcess() { /* ... */ }
|
KProcess() { /* ... */ }
|
||||||
|
@ -148,67 +148,67 @@ namespace ams::kern {
|
||||||
Result Initialize(const ams::svc::CreateProcessParameter ¶ms, svc::KUserPointer<const u32 *> caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool);
|
Result Initialize(const ams::svc::CreateProcessParameter ¶ms, svc::KUserPointer<const u32 *> caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool);
|
||||||
void Exit();
|
void Exit();
|
||||||
|
|
||||||
constexpr const char *GetName() const { return this->name; }
|
constexpr const char *GetName() const { return m_name; }
|
||||||
|
|
||||||
constexpr ams::svc::ProgramId GetProgramId() const { return this->program_id; }
|
constexpr ams::svc::ProgramId GetProgramId() const { return m_program_id; }
|
||||||
|
|
||||||
constexpr u64 GetProcessId() const { return this->process_id; }
|
constexpr u64 GetProcessId() const { return m_process_id; }
|
||||||
|
|
||||||
constexpr State GetState() const { return this->state; }
|
constexpr State GetState() const { return m_state; }
|
||||||
|
|
||||||
constexpr u64 GetCoreMask() const { return this->capabilities.GetCoreMask(); }
|
constexpr u64 GetCoreMask() const { return m_capabilities.GetCoreMask(); }
|
||||||
constexpr u64 GetPriorityMask() const { return this->capabilities.GetPriorityMask(); }
|
constexpr u64 GetPriorityMask() const { return m_capabilities.GetPriorityMask(); }
|
||||||
|
|
||||||
constexpr s32 GetIdealCoreId() const { return this->ideal_core_id; }
|
constexpr s32 GetIdealCoreId() const { return m_ideal_core_id; }
|
||||||
constexpr void SetIdealCoreId(s32 core_id) { this->ideal_core_id = core_id; }
|
constexpr void SetIdealCoreId(s32 core_id) { m_ideal_core_id = core_id; }
|
||||||
|
|
||||||
constexpr bool CheckThreadPriority(s32 prio) const { return ((1ul << prio) & this->GetPriorityMask()) != 0; }
|
constexpr bool CheckThreadPriority(s32 prio) const { return ((1ul << prio) & this->GetPriorityMask()) != 0; }
|
||||||
|
|
||||||
constexpr u32 GetCreateProcessFlags() const { return this->flags; }
|
constexpr u32 GetCreateProcessFlags() const { return m_flags; }
|
||||||
|
|
||||||
constexpr bool Is64Bit() const { return this->flags & ams::svc::CreateProcessFlag_Is64Bit; }
|
constexpr bool Is64Bit() const { return m_flags & ams::svc::CreateProcessFlag_Is64Bit; }
|
||||||
|
|
||||||
constexpr KProcessAddress GetEntryPoint() const { return this->code_address; }
|
constexpr KProcessAddress GetEntryPoint() const { return m_code_address; }
|
||||||
|
|
||||||
constexpr size_t GetMainStackSize() const { return this->main_thread_stack_size; }
|
constexpr size_t GetMainStackSize() const { return m_main_thread_stack_size; }
|
||||||
|
|
||||||
constexpr KMemoryManager::Pool GetMemoryPool() const { return this->memory_pool; }
|
constexpr KMemoryManager::Pool GetMemoryPool() const { return m_memory_pool; }
|
||||||
|
|
||||||
constexpr u64 GetRandomEntropy(size_t i) const { return this->entropy[i]; }
|
constexpr u64 GetRandomEntropy(size_t i) const { return m_entropy[i]; }
|
||||||
|
|
||||||
constexpr bool IsApplication() const { return this->is_application; }
|
constexpr bool IsApplication() const { return m_is_application; }
|
||||||
|
|
||||||
constexpr bool IsSuspended() const { return this->is_suspended; }
|
constexpr bool IsSuspended() const { return m_is_suspended; }
|
||||||
constexpr void SetSuspended(bool suspended) { this->is_suspended = suspended; }
|
constexpr void SetSuspended(bool suspended) { m_is_suspended = suspended; }
|
||||||
|
|
||||||
Result Terminate();
|
Result Terminate();
|
||||||
|
|
||||||
constexpr bool IsTerminated() const {
|
constexpr bool IsTerminated() const {
|
||||||
return this->state == State_Terminated;
|
return m_state == State_Terminated;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsAttachedToDebugger() const {
|
constexpr bool IsAttachedToDebugger() const {
|
||||||
return this->attached_object != nullptr;
|
return m_attached_object != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsPermittedInterrupt(int32_t interrupt_id) const {
|
constexpr bool IsPermittedInterrupt(int32_t interrupt_id) const {
|
||||||
return this->capabilities.IsPermittedInterrupt(interrupt_id);
|
return m_capabilities.IsPermittedInterrupt(interrupt_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsPermittedDebug() const {
|
constexpr bool IsPermittedDebug() const {
|
||||||
return this->capabilities.IsPermittedDebug();
|
return m_capabilities.IsPermittedDebug();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool CanForceDebug() const {
|
constexpr bool CanForceDebug() const {
|
||||||
return this->capabilities.CanForceDebug();
|
return m_capabilities.CanForceDebug();
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 GetAllocateOption() const { return this->page_table.GetAllocateOption(); }
|
u32 GetAllocateOption() const { return m_page_table.GetAllocateOption(); }
|
||||||
|
|
||||||
ThreadList &GetThreadList() { return this->thread_list; }
|
ThreadList &GetThreadList() { return m_thread_list; }
|
||||||
const ThreadList &GetThreadList() const { return this->thread_list; }
|
const ThreadList &GetThreadList() const { return m_thread_list; }
|
||||||
|
|
||||||
constexpr void *GetDebugObject() const { return this->attached_object; }
|
constexpr void *GetDebugObject() const { return m_attached_object; }
|
||||||
KProcess::State SetDebugObject(void *debug_object);
|
KProcess::State SetDebugObject(void *debug_object);
|
||||||
void ClearDebugObject(KProcess::State state);
|
void ClearDebugObject(KProcess::State state);
|
||||||
|
|
||||||
|
@ -223,46 +223,46 @@ namespace ams::kern {
|
||||||
|
|
||||||
KThread *GetPinnedThread(s32 core_id) const {
|
KThread *GetPinnedThread(s32 core_id) const {
|
||||||
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
||||||
return this->pinned_threads[core_id];
|
return m_pinned_threads[core_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
void CopySvcPermissionsTo(KThread::StackParameters &sp) {
|
void CopySvcPermissionsTo(KThread::StackParameters &sp) {
|
||||||
this->capabilities.CopySvcPermissionsTo(sp);
|
m_capabilities.CopySvcPermissionsTo(sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) {
|
void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) {
|
||||||
this->capabilities.CopyPinnedSvcPermissionsTo(sp);
|
m_capabilities.CopyPinnedSvcPermissionsTo(sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) {
|
void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) {
|
||||||
this->capabilities.CopyUnpinnedSvcPermissionsTo(sp);
|
m_capabilities.CopyUnpinnedSvcPermissionsTo(sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CopyEnterExceptionSvcPermissionsTo(KThread::StackParameters &sp) {
|
void CopyEnterExceptionSvcPermissionsTo(KThread::StackParameters &sp) {
|
||||||
this->capabilities.CopyEnterExceptionSvcPermissionsTo(sp);
|
m_capabilities.CopyEnterExceptionSvcPermissionsTo(sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CopyLeaveExceptionSvcPermissionsTo(KThread::StackParameters &sp) {
|
void CopyLeaveExceptionSvcPermissionsTo(KThread::StackParameters &sp) {
|
||||||
this->capabilities.CopyLeaveExceptionSvcPermissionsTo(sp);
|
m_capabilities.CopyLeaveExceptionSvcPermissionsTo(sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KResourceLimit *GetResourceLimit() const { return this->resource_limit; }
|
constexpr KResourceLimit *GetResourceLimit() const { return m_resource_limit; }
|
||||||
|
|
||||||
bool ReserveResource(ams::svc::LimitableResource which, s64 value);
|
bool ReserveResource(ams::svc::LimitableResource which, s64 value);
|
||||||
bool ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout);
|
bool ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout);
|
||||||
void ReleaseResource(ams::svc::LimitableResource which, s64 value);
|
void ReleaseResource(ams::svc::LimitableResource which, s64 value);
|
||||||
void ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint);
|
void ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint);
|
||||||
|
|
||||||
constexpr KLightLock &GetStateLock() { return this->state_lock; }
|
constexpr KLightLock &GetStateLock() { return m_state_lock; }
|
||||||
constexpr KLightLock &GetListLock() { return this->list_lock; }
|
constexpr KLightLock &GetListLock() { return m_list_lock; }
|
||||||
|
|
||||||
constexpr KProcessPageTable &GetPageTable() { return this->page_table; }
|
constexpr KProcessPageTable &GetPageTable() { return m_page_table; }
|
||||||
constexpr const KProcessPageTable &GetPageTable() const { return this->page_table; }
|
constexpr const KProcessPageTable &GetPageTable() const { return m_page_table; }
|
||||||
|
|
||||||
constexpr KHandleTable &GetHandleTable() { return this->handle_table; }
|
constexpr KHandleTable &GetHandleTable() { return m_handle_table; }
|
||||||
constexpr const KHandleTable &GetHandleTable() const { return this->handle_table; }
|
constexpr const KHandleTable &GetHandleTable() const { return m_handle_table; }
|
||||||
|
|
||||||
KWaitObject *GetWaitObjectPointer() { return std::addressof(this->wait_object); }
|
KWaitObject *GetWaitObjectPointer() { return std::addressof(m_wait_object); }
|
||||||
|
|
||||||
size_t GetUsedUserPhysicalMemorySize() const;
|
size_t GetUsedUserPhysicalMemorySize() const;
|
||||||
size_t GetTotalUserPhysicalMemorySize() const;
|
size_t GetTotalUserPhysicalMemorySize() const;
|
||||||
|
@ -276,45 +276,45 @@ namespace ams::kern {
|
||||||
Result DeleteThreadLocalRegion(KProcessAddress addr);
|
Result DeleteThreadLocalRegion(KProcessAddress addr);
|
||||||
void *GetThreadLocalRegionPointer(KProcessAddress addr);
|
void *GetThreadLocalRegionPointer(KProcessAddress addr);
|
||||||
|
|
||||||
constexpr KProcessAddress GetProcessLocalRegionAddress() const { return this->plr_address; }
|
constexpr KProcessAddress GetProcessLocalRegionAddress() const { return m_plr_address; }
|
||||||
|
|
||||||
void AddCpuTime(s64 diff) { this->cpu_time += diff; }
|
void AddCpuTime(s64 diff) { m_cpu_time += diff; }
|
||||||
s64 GetCpuTime() { return this->cpu_time; }
|
s64 GetCpuTime() { return m_cpu_time; }
|
||||||
|
|
||||||
constexpr s64 GetScheduledCount() const { return this->schedule_count; }
|
constexpr s64 GetScheduledCount() const { return m_schedule_count; }
|
||||||
void IncrementScheduledCount() { ++this->schedule_count; }
|
void IncrementScheduledCount() { ++m_schedule_count; }
|
||||||
|
|
||||||
void IncrementThreadCount();
|
void IncrementThreadCount();
|
||||||
void DecrementThreadCount();
|
void DecrementThreadCount();
|
||||||
|
|
||||||
size_t GetTotalSystemResourceSize() const { return this->system_resource_num_pages * PageSize; }
|
size_t GetTotalSystemResourceSize() const { return m_system_resource_num_pages * PageSize; }
|
||||||
size_t GetUsedSystemResourceSize() const {
|
size_t GetUsedSystemResourceSize() const {
|
||||||
if (this->system_resource_num_pages == 0) {
|
if (m_system_resource_num_pages == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return this->dynamic_page_manager.GetUsed() * PageSize;
|
return m_dynamic_page_manager.GetUsed() * PageSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetRunningThread(s32 core, KThread *thread, u64 idle_count) {
|
void SetRunningThread(s32 core, KThread *thread, u64 idle_count) {
|
||||||
this->running_threads[core] = thread;
|
m_running_threads[core] = thread;
|
||||||
this->running_thread_idle_counts[core] = idle_count;
|
m_running_thread_idle_counts[core] = idle_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClearRunningThread(KThread *thread) {
|
void ClearRunningThread(KThread *thread) {
|
||||||
for (size_t i = 0; i < util::size(this->running_threads); ++i) {
|
for (size_t i = 0; i < util::size(m_running_threads); ++i) {
|
||||||
if (this->running_threads[i] == thread) {
|
if (m_running_threads[i] == thread) {
|
||||||
this->running_threads[i] = nullptr;
|
m_running_threads[i] = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const KDynamicPageManager &GetDynamicPageManager() const { return this->dynamic_page_manager; }
|
const KDynamicPageManager &GetDynamicPageManager() const { return m_dynamic_page_manager; }
|
||||||
const KMemoryBlockSlabManager &GetMemoryBlockSlabManager() const { return this->memory_block_slab_manager; }
|
const KMemoryBlockSlabManager &GetMemoryBlockSlabManager() const { return m_memory_block_slab_manager; }
|
||||||
const KBlockInfoManager &GetBlockInfoManager() const { return this->block_info_manager; }
|
const KBlockInfoManager &GetBlockInfoManager() const { return m_block_info_manager; }
|
||||||
const KPageTableManager &GetPageTableManager() const { return this->page_table_manager; }
|
const KPageTableManager &GetPageTableManager() const { return m_page_table_manager; }
|
||||||
|
|
||||||
constexpr KThread *GetRunningThread(s32 core) const { return this->running_threads[core]; }
|
constexpr KThread *GetRunningThread(s32 core) const { return m_running_threads[core]; }
|
||||||
constexpr u64 GetRunningThreadIdleCount(s32 core) const { return this->running_thread_idle_counts[core]; }
|
constexpr u64 GetRunningThreadIdleCount(s32 core) const { return m_running_thread_idle_counts[core]; }
|
||||||
|
|
||||||
void RegisterThread(KThread *thread);
|
void RegisterThread(KThread *thread);
|
||||||
void UnregisterThread(KThread *thread);
|
void UnregisterThread(KThread *thread);
|
||||||
|
@ -324,13 +324,13 @@ namespace ams::kern {
|
||||||
Result Reset();
|
Result Reset();
|
||||||
|
|
||||||
void SetDebugBreak() {
|
void SetDebugBreak() {
|
||||||
if (this->state == State_RunningAttached) {
|
if (m_state == State_RunningAttached) {
|
||||||
this->ChangeState(State_DebugBreak);
|
this->ChangeState(State_DebugBreak);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetAttached() {
|
void SetAttached() {
|
||||||
if (this->state == State_DebugBreak) {
|
if (m_state == State_DebugBreak) {
|
||||||
this->ChangeState(State_RunningAttached);
|
this->ChangeState(State_RunningAttached);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -341,27 +341,27 @@ namespace ams::kern {
|
||||||
void UnpinCurrentThread();
|
void UnpinCurrentThread();
|
||||||
|
|
||||||
Result SignalToAddress(KProcessAddress address) {
|
Result SignalToAddress(KProcessAddress address) {
|
||||||
return this->cond_var.SignalToAddress(address);
|
return m_cond_var.SignalToAddress(address);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result WaitForAddress(ams::svc::Handle handle, KProcessAddress address, u32 tag) {
|
Result WaitForAddress(ams::svc::Handle handle, KProcessAddress address, u32 tag) {
|
||||||
return this->cond_var.WaitForAddress(handle, address, tag);
|
return m_cond_var.WaitForAddress(handle, address, tag);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SignalConditionVariable(uintptr_t cv_key, int32_t count) {
|
void SignalConditionVariable(uintptr_t cv_key, int32_t count) {
|
||||||
return this->cond_var.Signal(cv_key, count);
|
return m_cond_var.Signal(cv_key, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
|
Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
|
||||||
return this->cond_var.Wait(address, cv_key, tag, ns);
|
return m_cond_var.Wait(address, cv_key, tag, ns);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SignalAddressArbiter(uintptr_t address, ams::svc::SignalType signal_type, s32 value, s32 count) {
|
Result SignalAddressArbiter(uintptr_t address, ams::svc::SignalType signal_type, s32 value, s32 count) {
|
||||||
return this->address_arbiter.SignalToAddress(address, signal_type, value, count);
|
return m_address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s32 value, s64 timeout) {
|
Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s32 value, s64 timeout) {
|
||||||
return this->address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
return m_address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count);
|
Result GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count);
|
||||||
|
@ -381,7 +381,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
/* Overridden parent functions. */
|
/* Overridden parent functions. */
|
||||||
virtual bool IsInitialized() const override { return this->is_initialized; }
|
virtual bool IsInitialized() const override { return m_is_initialized; }
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
|
||||||
|
@ -392,15 +392,15 @@ namespace ams::kern {
|
||||||
virtual bool IsSignaled() const override {
|
virtual bool IsSignaled() const override {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||||
return this->is_signaled;
|
return m_is_signaled;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void DoWorkerTask() override;
|
virtual void DoWorkerTask() override;
|
||||||
private:
|
private:
|
||||||
void ChangeState(State new_state) {
|
void ChangeState(State new_state) {
|
||||||
if (this->state != new_state) {
|
if (m_state != new_state) {
|
||||||
this->state = new_state;
|
m_state = new_state;
|
||||||
this->is_signaled = true;
|
m_is_signaled = true;
|
||||||
this->NotifyAvailable();
|
this->NotifyAvailable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,19 +24,19 @@ namespace ams::kern {
|
||||||
class KReadableEvent : public KSynchronizationObject {
|
class KReadableEvent : public KSynchronizationObject {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
|
||||||
private:
|
private:
|
||||||
bool is_signaled;
|
bool m_is_signaled;
|
||||||
KEvent *parent_event;
|
KEvent *m_parent;
|
||||||
public:
|
public:
|
||||||
constexpr explicit KReadableEvent() : KSynchronizationObject(), is_signaled(), parent_event() { MESOSPHERE_ASSERT_THIS(); }
|
constexpr explicit KReadableEvent() : KSynchronizationObject(), m_is_signaled(), m_parent() { MESOSPHERE_ASSERT_THIS(); }
|
||||||
virtual ~KReadableEvent() { MESOSPHERE_ASSERT_THIS(); }
|
virtual ~KReadableEvent() { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
||||||
constexpr void Initialize(KEvent *parent) {
|
constexpr void Initialize(KEvent *parent) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
this->is_signaled = false;
|
m_is_signaled = false;
|
||||||
this->parent_event = parent;
|
m_parent = parent;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KEvent *GetParent() const { return this->parent_event; }
|
constexpr KEvent *GetParent() const { return m_parent; }
|
||||||
|
|
||||||
virtual bool IsSignaled() const override;
|
virtual bool IsSignaled() const override;
|
||||||
virtual void Destroy() override;
|
virtual void Destroy() override;
|
||||||
|
|
|
@ -25,15 +25,15 @@ namespace ams::kern {
|
||||||
class KResourceLimit final : public KAutoObjectWithSlabHeapAndContainer<KResourceLimit, KAutoObjectWithList> {
|
class KResourceLimit final : public KAutoObjectWithSlabHeapAndContainer<KResourceLimit, KAutoObjectWithList> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject);
|
||||||
private:
|
private:
|
||||||
s64 limit_values[ams::svc::LimitableResource_Count];
|
s64 m_limit_values[ams::svc::LimitableResource_Count];
|
||||||
s64 current_values[ams::svc::LimitableResource_Count];
|
s64 m_current_values[ams::svc::LimitableResource_Count];
|
||||||
s64 current_hints[ams::svc::LimitableResource_Count];
|
s64 m_current_hints[ams::svc::LimitableResource_Count];
|
||||||
s64 peak_values[ams::svc::LimitableResource_Count];
|
s64 m_peak_values[ams::svc::LimitableResource_Count];
|
||||||
mutable KLightLock lock;
|
mutable KLightLock m_lock;
|
||||||
s32 waiter_count;
|
s32 m_waiter_count;
|
||||||
KLightConditionVariable cond_var;
|
KLightConditionVariable m_cond_var;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KResourceLimit() : limit_values(), current_values(), current_hints(), peak_values(), lock(), waiter_count(), cond_var() { /* ... */ }
|
constexpr ALWAYS_INLINE KResourceLimit() : m_limit_values(), m_current_values(), m_current_hints(), m_peak_values(), m_lock(), m_waiter_count(), m_cond_var() { /* ... */ }
|
||||||
virtual ~KResourceLimit() { /* ... */ }
|
virtual ~KResourceLimit() { /* ... */ }
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
|
|
@ -50,35 +50,35 @@ namespace ams::kern {
|
||||||
friend class KScopedSchedulerLockAndSleep;
|
friend class KScopedSchedulerLockAndSleep;
|
||||||
friend class KScopedDisableDispatch;
|
friend class KScopedDisableDispatch;
|
||||||
private:
|
private:
|
||||||
SchedulingState state;
|
SchedulingState m_state;
|
||||||
bool is_active;
|
bool m_is_active;
|
||||||
s32 core_id;
|
s32 m_core_id;
|
||||||
KThread *prev_thread;
|
KThread *m_prev_thread;
|
||||||
s64 last_context_switch_time;
|
s64 m_last_context_switch_time;
|
||||||
KThread *idle_thread;
|
KThread *m_idle_thread;
|
||||||
std::atomic<KThread *> current_thread;
|
std::atomic<KThread *> m_current_thread;
|
||||||
public:
|
public:
|
||||||
constexpr KScheduler()
|
constexpr KScheduler()
|
||||||
: state(), is_active(false), core_id(0), prev_thread(nullptr), last_context_switch_time(0), idle_thread(nullptr), current_thread(nullptr)
|
: m_state(), m_is_active(false), m_core_id(0), m_prev_thread(nullptr), m_last_context_switch_time(0), m_idle_thread(nullptr), m_current_thread(nullptr)
|
||||||
{
|
{
|
||||||
this->state.needs_scheduling = true;
|
m_state.needs_scheduling = true;
|
||||||
this->state.interrupt_task_thread_runnable = false;
|
m_state.interrupt_task_thread_runnable = false;
|
||||||
this->state.should_count_idle = false;
|
m_state.should_count_idle = false;
|
||||||
this->state.idle_count = 0;
|
m_state.idle_count = 0;
|
||||||
this->state.idle_thread_stack = nullptr;
|
m_state.idle_thread_stack = nullptr;
|
||||||
this->state.highest_priority_thread = nullptr;
|
m_state.highest_priority_thread = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE void Initialize(KThread *idle_thread);
|
NOINLINE void Initialize(KThread *idle_thread);
|
||||||
NOINLINE void Activate();
|
NOINLINE void Activate();
|
||||||
|
|
||||||
ALWAYS_INLINE void SetInterruptTaskRunnable() {
|
ALWAYS_INLINE void SetInterruptTaskRunnable() {
|
||||||
this->state.interrupt_task_thread_runnable = true;
|
m_state.interrupt_task_thread_runnable = true;
|
||||||
this->state.needs_scheduling = true;
|
m_state.needs_scheduling = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void RequestScheduleOnInterrupt() {
|
ALWAYS_INLINE void RequestScheduleOnInterrupt() {
|
||||||
this->state.needs_scheduling = true;
|
m_state.needs_scheduling = true;
|
||||||
|
|
||||||
if (CanSchedule()) {
|
if (CanSchedule()) {
|
||||||
this->ScheduleOnInterrupt();
|
this->ScheduleOnInterrupt();
|
||||||
|
@ -86,23 +86,23 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE u64 GetIdleCount() const {
|
ALWAYS_INLINE u64 GetIdleCount() const {
|
||||||
return this->state.idle_count;
|
return m_state.idle_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE KThread *GetIdleThread() const {
|
ALWAYS_INLINE KThread *GetIdleThread() const {
|
||||||
return this->idle_thread;
|
return m_idle_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE KThread *GetPreviousThread() const {
|
ALWAYS_INLINE KThread *GetPreviousThread() const {
|
||||||
return this->prev_thread;
|
return m_prev_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const {
|
ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const {
|
||||||
return this->current_thread;
|
return m_current_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE s64 GetLastContextSwitchTime() const {
|
ALWAYS_INLINE s64 GetLastContextSwitchTime() const {
|
||||||
return this->last_context_switch_time;
|
return m_last_context_switch_time;
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
/* Static private API. */
|
/* Static private API. */
|
||||||
|
@ -161,7 +161,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
ALWAYS_INLINE void Schedule() {
|
ALWAYS_INLINE void Schedule() {
|
||||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
||||||
MESOSPHERE_ASSERT(this->core_id == GetCurrentCoreId());
|
MESOSPHERE_ASSERT(m_core_id == GetCurrentCoreId());
|
||||||
|
|
||||||
this->ScheduleImpl();
|
this->ScheduleImpl();
|
||||||
}
|
}
|
||||||
|
@ -181,7 +181,7 @@ namespace ams::kern {
|
||||||
KScopedInterruptDisable intr_disable;
|
KScopedInterruptDisable intr_disable;
|
||||||
ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); };
|
ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); };
|
||||||
|
|
||||||
if (this->state.needs_scheduling) {
|
if (m_state.needs_scheduling) {
|
||||||
Schedule();
|
Schedule();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,16 +33,16 @@ namespace ams::kern {
|
||||||
template<typename SchedulerType> requires KSchedulerLockable<SchedulerType>
|
template<typename SchedulerType> requires KSchedulerLockable<SchedulerType>
|
||||||
class KAbstractSchedulerLock {
|
class KAbstractSchedulerLock {
|
||||||
private:
|
private:
|
||||||
KAlignedSpinLock spin_lock;
|
KAlignedSpinLock m_spin_lock;
|
||||||
s32 lock_count;
|
s32 m_lock_count;
|
||||||
KThread *owner_thread;
|
KThread *m_owner_thread;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KAbstractSchedulerLock() : spin_lock(), lock_count(0), owner_thread(nullptr) { MESOSPHERE_ASSERT_THIS(); }
|
constexpr ALWAYS_INLINE KAbstractSchedulerLock() : m_spin_lock(), m_lock_count(0), m_owner_thread(nullptr) { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
||||||
ALWAYS_INLINE bool IsLockedByCurrentThread() const {
|
ALWAYS_INLINE bool IsLockedByCurrentThread() const {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
return this->owner_thread == GetCurrentThreadPointer();
|
return m_owner_thread == GetCurrentThreadPointer();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Lock() {
|
void Lock() {
|
||||||
|
@ -50,36 +50,36 @@ namespace ams::kern {
|
||||||
|
|
||||||
if (this->IsLockedByCurrentThread()) {
|
if (this->IsLockedByCurrentThread()) {
|
||||||
/* If we already own the lock, we can just increment the count. */
|
/* If we already own the lock, we can just increment the count. */
|
||||||
MESOSPHERE_ASSERT(this->lock_count > 0);
|
MESOSPHERE_ASSERT(m_lock_count > 0);
|
||||||
this->lock_count++;
|
m_lock_count++;
|
||||||
} else {
|
} else {
|
||||||
/* Otherwise, we want to disable scheduling and acquire the spinlock. */
|
/* Otherwise, we want to disable scheduling and acquire the spinlock. */
|
||||||
SchedulerType::DisableScheduling();
|
SchedulerType::DisableScheduling();
|
||||||
this->spin_lock.Lock();
|
m_spin_lock.Lock();
|
||||||
|
|
||||||
/* For debug, ensure that our state is valid. */
|
/* For debug, ensure that our state is valid. */
|
||||||
MESOSPHERE_ASSERT(this->lock_count == 0);
|
MESOSPHERE_ASSERT(m_lock_count == 0);
|
||||||
MESOSPHERE_ASSERT(this->owner_thread == nullptr);
|
MESOSPHERE_ASSERT(m_owner_thread == nullptr);
|
||||||
|
|
||||||
/* Increment count, take ownership. */
|
/* Increment count, take ownership. */
|
||||||
this->lock_count = 1;
|
m_lock_count = 1;
|
||||||
this->owner_thread = GetCurrentThreadPointer();
|
m_owner_thread = GetCurrentThreadPointer();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Unlock() {
|
void Unlock() {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
MESOSPHERE_ASSERT(this->lock_count > 0);
|
MESOSPHERE_ASSERT(m_lock_count > 0);
|
||||||
|
|
||||||
/* Release an instance of the lock. */
|
/* Release an instance of the lock. */
|
||||||
if ((--this->lock_count) == 0) {
|
if ((--m_lock_count) == 0) {
|
||||||
/* We're no longer going to hold the lock. Take note of what cores need scheduling. */
|
/* We're no longer going to hold the lock. Take note of what cores need scheduling. */
|
||||||
const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads();
|
const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads();
|
||||||
|
|
||||||
/* Note that we no longer hold the lock, and unlock the spinlock. */
|
/* Note that we no longer hold the lock, and unlock the spinlock. */
|
||||||
this->owner_thread = nullptr;
|
m_owner_thread = nullptr;
|
||||||
this->spin_lock.Unlock();
|
m_spin_lock.Unlock();
|
||||||
|
|
||||||
/* Enable scheduling, and perform a rescheduling operation. */
|
/* Enable scheduling, and perform a rescheduling operation. */
|
||||||
SchedulerType::EnableScheduling(cores_needing_scheduling);
|
SchedulerType::EnableScheduling(cores_needing_scheduling);
|
||||||
|
|
|
@ -29,11 +29,11 @@ namespace ams::kern {
|
||||||
NON_COPYABLE(KScopedLock);
|
NON_COPYABLE(KScopedLock);
|
||||||
NON_MOVEABLE(KScopedLock);
|
NON_MOVEABLE(KScopedLock);
|
||||||
private:
|
private:
|
||||||
T *lock_ptr;
|
T &m_lock;
|
||||||
public:
|
public:
|
||||||
explicit ALWAYS_INLINE KScopedLock(T *l) : lock_ptr(l) { this->lock_ptr->Lock(); }
|
explicit ALWAYS_INLINE KScopedLock(T &l) : m_lock(l) { m_lock.Lock(); }
|
||||||
explicit ALWAYS_INLINE KScopedLock(T &l) : KScopedLock(std::addressof(l)) { /* ... */ }
|
explicit ALWAYS_INLINE KScopedLock(T *l) : KScopedLock(*l) { /* ... */ }
|
||||||
ALWAYS_INLINE ~KScopedLock() { this->lock_ptr->Unlock(); }
|
ALWAYS_INLINE ~KScopedLock() { m_lock.Unlock(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,24 +22,24 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KScopedResourceReservation {
|
class KScopedResourceReservation {
|
||||||
private:
|
private:
|
||||||
KResourceLimit *limit;
|
KResourceLimit *m_limit;
|
||||||
s64 value;
|
s64 m_value;
|
||||||
ams::svc::LimitableResource resource;
|
ams::svc::LimitableResource m_resource;
|
||||||
bool succeeded;
|
bool m_succeeded;
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v, s64 timeout) : limit(l), value(v), resource(r) {
|
ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v, s64 timeout) : m_limit(l), m_value(v), m_resource(r) {
|
||||||
if (this->limit && this->value) {
|
if (m_limit && m_value) {
|
||||||
this->succeeded = this->limit->Reserve(this->resource, this->value, timeout);
|
m_succeeded = m_limit->Reserve(m_resource, m_value, timeout);
|
||||||
} else {
|
} else {
|
||||||
this->succeeded = true;
|
m_succeeded = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v = 1) : limit(l), value(v), resource(r) {
|
ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v = 1) : m_limit(l), m_value(v), m_resource(r) {
|
||||||
if (this->limit && this->value) {
|
if (m_limit && m_value) {
|
||||||
this->succeeded = this->limit->Reserve(this->resource, this->value);
|
m_succeeded = m_limit->Reserve(m_resource, m_value);
|
||||||
} else {
|
} else {
|
||||||
this->succeeded = true;
|
m_succeeded = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,17 +47,17 @@ namespace ams::kern {
|
||||||
ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v = 1) : KScopedResourceReservation(p->GetResourceLimit(), r, v) { /* ... */ }
|
ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v = 1) : KScopedResourceReservation(p->GetResourceLimit(), r, v) { /* ... */ }
|
||||||
|
|
||||||
ALWAYS_INLINE ~KScopedResourceReservation() {
|
ALWAYS_INLINE ~KScopedResourceReservation() {
|
||||||
if (this->limit && this->value && this->succeeded) {
|
if (m_limit && m_value && m_succeeded) {
|
||||||
this->limit->Release(this->resource, this->value);
|
m_limit->Release(m_resource, m_value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void Commit() {
|
ALWAYS_INLINE void Commit() {
|
||||||
this->limit = nullptr;
|
m_limit = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE bool Succeeded() const {
|
ALWAYS_INLINE bool Succeeded() const {
|
||||||
return this->succeeded;
|
return m_succeeded;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -23,24 +23,24 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KScopedSchedulerLockAndSleep {
|
class KScopedSchedulerLockAndSleep {
|
||||||
private:
|
private:
|
||||||
s64 timeout_tick;
|
s64 m_timeout_tick;
|
||||||
KThread *thread;
|
KThread *m_thread;
|
||||||
KHardwareTimer *timer;
|
KHardwareTimer *m_timer;
|
||||||
public:
|
public:
|
||||||
explicit ALWAYS_INLINE KScopedSchedulerLockAndSleep(KHardwareTimer **out_timer, KThread *t, s64 timeout) : timeout_tick(timeout), thread(t) {
|
explicit ALWAYS_INLINE KScopedSchedulerLockAndSleep(KHardwareTimer **out_timer, KThread *t, s64 timeout) : m_timeout_tick(timeout), m_thread(t) {
|
||||||
/* Lock the scheduler. */
|
/* Lock the scheduler. */
|
||||||
KScheduler::s_scheduler_lock.Lock();
|
KScheduler::s_scheduler_lock.Lock();
|
||||||
|
|
||||||
/* Set our timer only if the absolute time is positive. */
|
/* Set our timer only if the absolute time is positive. */
|
||||||
this->timer = (this->timeout_tick > 0) ? std::addressof(Kernel::GetHardwareTimer()) : nullptr;
|
m_timer = (m_timeout_tick > 0) ? std::addressof(Kernel::GetHardwareTimer()) : nullptr;
|
||||||
|
|
||||||
*out_timer = this->timer;
|
*out_timer = m_timer;
|
||||||
}
|
}
|
||||||
|
|
||||||
~KScopedSchedulerLockAndSleep() {
|
~KScopedSchedulerLockAndSleep() {
|
||||||
/* Register the sleep. */
|
/* Register the sleep. */
|
||||||
if (this->timeout_tick > 0) {
|
if (m_timeout_tick > 0) {
|
||||||
this->timer->RegisterAbsoluteTask(this->thread, this->timeout_tick);
|
m_timer->RegisterAbsoluteTask(m_thread, m_timeout_tick);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unlock the scheduler. */
|
/* Unlock the scheduler. */
|
||||||
|
@ -48,7 +48,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void CancelSleep() {
|
ALWAYS_INLINE void CancelSleep() {
|
||||||
this->timeout_tick = 0;
|
m_timeout_tick = 0;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -30,11 +30,11 @@ namespace ams::kern {
|
||||||
using SessionList = util::IntrusiveListBaseTraits<KServerSession>::ListType;
|
using SessionList = util::IntrusiveListBaseTraits<KServerSession>::ListType;
|
||||||
using LightSessionList = util::IntrusiveListBaseTraits<KLightServerSession>::ListType;
|
using LightSessionList = util::IntrusiveListBaseTraits<KLightServerSession>::ListType;
|
||||||
private:
|
private:
|
||||||
SessionList session_list;
|
SessionList m_session_list;
|
||||||
LightSessionList light_session_list;
|
LightSessionList m_light_session_list;
|
||||||
KPort *parent;
|
KPort *m_parent;
|
||||||
public:
|
public:
|
||||||
constexpr KServerPort() : session_list(), light_session_list(), parent() { /* ... */ }
|
constexpr KServerPort() : m_session_list(), m_light_session_list(), m_parent() { /* ... */ }
|
||||||
virtual ~KServerPort() { /* ... */ }
|
virtual ~KServerPort() { /* ... */ }
|
||||||
|
|
||||||
void Initialize(KPort *parent);
|
void Initialize(KPort *parent);
|
||||||
|
@ -44,7 +44,7 @@ namespace ams::kern {
|
||||||
KServerSession *AcceptSession();
|
KServerSession *AcceptSession();
|
||||||
KLightServerSession *AcceptLightSession();
|
KLightServerSession *AcceptLightSession();
|
||||||
|
|
||||||
constexpr const KPort *GetParent() const { return this->parent; }
|
constexpr const KPort *GetParent() const { return m_parent; }
|
||||||
|
|
||||||
bool IsLight() const;
|
bool IsLight() const;
|
||||||
|
|
||||||
|
|
|
@ -28,19 +28,19 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
using RequestList = util::IntrusiveListBaseTraits<KSessionRequest>::ListType;
|
using RequestList = util::IntrusiveListBaseTraits<KSessionRequest>::ListType;
|
||||||
private:
|
private:
|
||||||
KSession *parent;
|
KSession *m_parent;
|
||||||
RequestList request_list;
|
RequestList m_request_list;
|
||||||
KSessionRequest *current_request;
|
KSessionRequest *m_current_request;
|
||||||
KLightLock lock;
|
KLightLock m_lock;
|
||||||
public:
|
public:
|
||||||
constexpr KServerSession() : parent(), request_list(), current_request(), lock() { /* ... */ }
|
constexpr KServerSession() : m_parent(), m_request_list(), m_current_request(), m_lock() { /* ... */ }
|
||||||
virtual ~KServerSession() { /* ... */ }
|
virtual ~KServerSession() { /* ... */ }
|
||||||
|
|
||||||
virtual void Destroy() override;
|
virtual void Destroy() override;
|
||||||
|
|
||||||
void Initialize(KSession *p) { this->parent = p; }
|
void Initialize(KSession *p) { m_parent = p; }
|
||||||
|
|
||||||
constexpr const KSession *GetParent() const { return this->parent; }
|
constexpr const KSession *GetParent() const { return m_parent; }
|
||||||
|
|
||||||
virtual bool IsSignaled() const override;
|
virtual bool IsSignaled() const override;
|
||||||
|
|
||||||
|
|
|
@ -35,16 +35,16 @@ namespace ams::kern {
|
||||||
ServerClosed = 3,
|
ServerClosed = 3,
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
KServerSession server;
|
KServerSession m_server;
|
||||||
KClientSession client;
|
KClientSession m_client;
|
||||||
State state;
|
State m_state;
|
||||||
KClientPort *port;
|
KClientPort *m_port;
|
||||||
uintptr_t name;
|
uintptr_t m_name;
|
||||||
KProcess *process;
|
KProcess *m_process;
|
||||||
bool initialized;
|
bool m_initialized;
|
||||||
public:
|
public:
|
||||||
constexpr KSession()
|
constexpr KSession()
|
||||||
: server(), client(), state(State::Invalid), port(), name(), process(), initialized()
|
: m_server(), m_client(), m_state(State::Invalid), m_port(), m_name(), m_process(), m_initialized()
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
@ -54,25 +54,25 @@ namespace ams::kern {
|
||||||
void Initialize(KClientPort *client_port, uintptr_t name);
|
void Initialize(KClientPort *client_port, uintptr_t name);
|
||||||
virtual void Finalize() override;
|
virtual void Finalize() override;
|
||||||
|
|
||||||
virtual bool IsInitialized() const override { return this->initialized; }
|
virtual bool IsInitialized() const override { return m_initialized; }
|
||||||
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(this->process); }
|
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(m_process); }
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg);
|
static void PostDestroy(uintptr_t arg);
|
||||||
|
|
||||||
void OnServerClosed();
|
void OnServerClosed();
|
||||||
void OnClientClosed();
|
void OnClientClosed();
|
||||||
|
|
||||||
bool IsServerClosed() const { return this->state != State::Normal; }
|
bool IsServerClosed() const { return m_state != State::Normal; }
|
||||||
bool IsClientClosed() const { return this->state != State::Normal; }
|
bool IsClientClosed() const { return m_state != State::Normal; }
|
||||||
|
|
||||||
Result OnRequest(KSessionRequest *request) { return this->server.OnRequest(request); }
|
Result OnRequest(KSessionRequest *request) { return m_server.OnRequest(request); }
|
||||||
|
|
||||||
KClientSession &GetClientSession() { return this->client; }
|
KClientSession &GetClientSession() { return m_client; }
|
||||||
KServerSession &GetServerSession() { return this->server; }
|
KServerSession &GetServerSession() { return m_server; }
|
||||||
const KClientSession &GetClientSession() const { return this->client; }
|
const KClientSession &GetClientSession() const { return m_client; }
|
||||||
const KServerSession &GetServerSession() const { return this->server; }
|
const KServerSession &GetServerSession() const { return m_server; }
|
||||||
|
|
||||||
const KClientPort *GetParent() const { return this->port; }
|
const KClientPort *GetParent() const { return m_port; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,38 +33,38 @@ namespace ams::kern {
|
||||||
|
|
||||||
class Mapping {
|
class Mapping {
|
||||||
private:
|
private:
|
||||||
KProcessAddress client_address;
|
KProcessAddress m_client_address;
|
||||||
KProcessAddress server_address;
|
KProcessAddress m_server_address;
|
||||||
size_t size;
|
size_t m_size;
|
||||||
KMemoryState state;
|
KMemoryState m_state;
|
||||||
public:
|
public:
|
||||||
constexpr void Set(KProcessAddress c, KProcessAddress s, size_t sz, KMemoryState st) {
|
constexpr void Set(KProcessAddress c, KProcessAddress s, size_t sz, KMemoryState st) {
|
||||||
this->client_address = c;
|
m_client_address = c;
|
||||||
this->server_address = s;
|
m_server_address = s;
|
||||||
this->size = sz;
|
m_size = sz;
|
||||||
this->state = st;
|
m_state = st;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KProcessAddress GetClientAddress() const { return this->client_address; }
|
constexpr ALWAYS_INLINE KProcessAddress GetClientAddress() const { return m_client_address; }
|
||||||
constexpr ALWAYS_INLINE KProcessAddress GetServerAddress() const { return this->server_address; }
|
constexpr ALWAYS_INLINE KProcessAddress GetServerAddress() const { return m_server_address; }
|
||||||
constexpr ALWAYS_INLINE size_t GetSize() const { return this->size; }
|
constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; }
|
||||||
constexpr ALWAYS_INLINE KMemoryState GetMemoryState() const { return this->state; }
|
constexpr ALWAYS_INLINE KMemoryState GetMemoryState() const { return m_state; }
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
Mapping static_mappings[NumStaticMappings];
|
Mapping m_static_mappings[NumStaticMappings];
|
||||||
Mapping *mappings;
|
Mapping *m_mappings;
|
||||||
u8 num_send;
|
u8 m_num_send;
|
||||||
u8 num_recv;
|
u8 m_num_recv;
|
||||||
u8 num_exch;
|
u8 m_num_exch;
|
||||||
public:
|
public:
|
||||||
constexpr explicit SessionMappings() : static_mappings(), mappings(), num_send(), num_recv(), num_exch() { /* ... */ }
|
constexpr explicit SessionMappings() : m_static_mappings(), m_mappings(), m_num_send(), m_num_recv(), m_num_exch() { /* ... */ }
|
||||||
|
|
||||||
void Initialize() { /* ... */ }
|
void Initialize() { /* ... */ }
|
||||||
void Finalize();
|
void Finalize();
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE size_t GetSendCount() const { return this->num_send; }
|
constexpr ALWAYS_INLINE size_t GetSendCount() const { return m_num_send; }
|
||||||
constexpr ALWAYS_INLINE size_t GetReceiveCount() const { return this->num_recv; }
|
constexpr ALWAYS_INLINE size_t GetReceiveCount() const { return m_num_recv; }
|
||||||
constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return this->num_exch; }
|
constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return m_num_exch; }
|
||||||
|
|
||||||
Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state);
|
Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state);
|
||||||
Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state);
|
Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state);
|
||||||
|
@ -88,49 +88,49 @@ namespace ams::kern {
|
||||||
Result PushMap(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state, size_t index);
|
Result PushMap(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state, size_t index);
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE const Mapping &GetSendMapping(size_t i) const {
|
constexpr ALWAYS_INLINE const Mapping &GetSendMapping(size_t i) const {
|
||||||
MESOSPHERE_ASSERT(i < this->num_send);
|
MESOSPHERE_ASSERT(i < m_num_send);
|
||||||
|
|
||||||
const size_t index = i;
|
const size_t index = i;
|
||||||
if (index < NumStaticMappings) {
|
if (index < NumStaticMappings) {
|
||||||
return this->static_mappings[index];
|
return m_static_mappings[index];
|
||||||
} else {
|
} else {
|
||||||
return this->mappings[index - NumStaticMappings];
|
return m_mappings[index - NumStaticMappings];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE const Mapping &GetReceiveMapping(size_t i) const {
|
constexpr ALWAYS_INLINE const Mapping &GetReceiveMapping(size_t i) const {
|
||||||
MESOSPHERE_ASSERT(i < this->num_recv);
|
MESOSPHERE_ASSERT(i < m_num_recv);
|
||||||
|
|
||||||
const size_t index = this->num_send + i;
|
const size_t index = m_num_send + i;
|
||||||
if (index < NumStaticMappings) {
|
if (index < NumStaticMappings) {
|
||||||
return this->static_mappings[index];
|
return m_static_mappings[index];
|
||||||
} else {
|
} else {
|
||||||
return this->mappings[index - NumStaticMappings];
|
return m_mappings[index - NumStaticMappings];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE const Mapping &GetExchangeMapping(size_t i) const {
|
constexpr ALWAYS_INLINE const Mapping &GetExchangeMapping(size_t i) const {
|
||||||
MESOSPHERE_ASSERT(i < this->num_exch);
|
MESOSPHERE_ASSERT(i < m_num_exch);
|
||||||
|
|
||||||
const size_t index = this->num_send + this->num_recv + i;
|
const size_t index = m_num_send + m_num_recv + i;
|
||||||
if (index < NumStaticMappings) {
|
if (index < NumStaticMappings) {
|
||||||
return this->static_mappings[index];
|
return m_static_mappings[index];
|
||||||
} else {
|
} else {
|
||||||
return this->mappings[index - NumStaticMappings];
|
return m_mappings[index - NumStaticMappings];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
SessionMappings mappings;
|
SessionMappings m_mappings;
|
||||||
KThread *thread;
|
KThread *m_thread;
|
||||||
KProcess *server;
|
KProcess *m_server;
|
||||||
KWritableEvent *event;
|
KWritableEvent *m_event;
|
||||||
uintptr_t address;
|
uintptr_t m_address;
|
||||||
size_t size;
|
size_t m_size;
|
||||||
public:
|
public:
|
||||||
constexpr KSessionRequest() : mappings(), thread(), server(), event(), address(), size() { /* ... */ }
|
constexpr KSessionRequest() : m_mappings(), m_thread(), m_server(), m_event(), m_address(), m_size() { /* ... */ }
|
||||||
virtual ~KSessionRequest() { /* ... */ }
|
virtual ~KSessionRequest() { /* ... */ }
|
||||||
|
|
||||||
static KSessionRequest *Create() {
|
static KSessionRequest *Create() {
|
||||||
|
@ -147,79 +147,79 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Initialize(KWritableEvent *event, uintptr_t address, size_t size) {
|
void Initialize(KWritableEvent *event, uintptr_t address, size_t size) {
|
||||||
this->mappings.Initialize();
|
m_mappings.Initialize();
|
||||||
|
|
||||||
this->thread = std::addressof(GetCurrentThread());
|
m_thread = std::addressof(GetCurrentThread());
|
||||||
this->event = event;
|
m_event = event;
|
||||||
this->address = address;
|
m_address = address;
|
||||||
this->size = size;
|
m_size = size;
|
||||||
|
|
||||||
this->thread->Open();
|
m_thread->Open();
|
||||||
if (this->event != nullptr) {
|
if (m_event != nullptr) {
|
||||||
this->event->Open();
|
m_event->Open();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void Finalize() override {
|
virtual void Finalize() override {
|
||||||
this->mappings.Finalize();
|
m_mappings.Finalize();
|
||||||
|
|
||||||
if (this->thread) {
|
if (m_thread) {
|
||||||
this->thread->Close();
|
m_thread->Close();
|
||||||
}
|
}
|
||||||
if (this->event) {
|
if (m_event) {
|
||||||
this->event->Close();
|
m_event->Close();
|
||||||
}
|
}
|
||||||
if (this->server) {
|
if (m_server) {
|
||||||
this->server->Close();
|
m_server->Close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KThread *GetThread() const { return this->thread; }
|
constexpr ALWAYS_INLINE KThread *GetThread() const { return m_thread; }
|
||||||
constexpr ALWAYS_INLINE KWritableEvent *GetEvent() const { return this->event; }
|
constexpr ALWAYS_INLINE KWritableEvent *GetEvent() const { return m_event; }
|
||||||
constexpr ALWAYS_INLINE uintptr_t GetAddress() const { return this->address; }
|
constexpr ALWAYS_INLINE uintptr_t GetAddress() const { return m_address; }
|
||||||
constexpr ALWAYS_INLINE size_t GetSize() const { return this->size; }
|
constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; }
|
||||||
constexpr ALWAYS_INLINE KProcess *GetServerProcess() const { return this->server; }
|
constexpr ALWAYS_INLINE KProcess *GetServerProcess() const { return m_server; }
|
||||||
|
|
||||||
void ALWAYS_INLINE SetServerProcess(KProcess *process) {
|
void ALWAYS_INLINE SetServerProcess(KProcess *process) {
|
||||||
this->server = process;
|
m_server = process;
|
||||||
this->server->Open();
|
m_server->Open();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void ClearThread() { this->thread = nullptr; }
|
constexpr ALWAYS_INLINE void ClearThread() { m_thread = nullptr; }
|
||||||
constexpr ALWAYS_INLINE void ClearEvent() { this->event = nullptr; }
|
constexpr ALWAYS_INLINE void ClearEvent() { m_event = nullptr; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE size_t GetSendCount() const { return this->mappings.GetSendCount(); }
|
constexpr ALWAYS_INLINE size_t GetSendCount() const { return m_mappings.GetSendCount(); }
|
||||||
constexpr ALWAYS_INLINE size_t GetReceiveCount() const { return this->mappings.GetReceiveCount(); }
|
constexpr ALWAYS_INLINE size_t GetReceiveCount() const { return m_mappings.GetReceiveCount(); }
|
||||||
constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return this->mappings.GetExchangeCount(); }
|
constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return m_mappings.GetExchangeCount(); }
|
||||||
|
|
||||||
ALWAYS_INLINE Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
ALWAYS_INLINE Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
||||||
return this->mappings.PushSend(client, server, size, state);
|
return m_mappings.PushSend(client, server, size, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
ALWAYS_INLINE Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
||||||
return this->mappings.PushReceive(client, server, size, state);
|
return m_mappings.PushReceive(client, server, size, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
ALWAYS_INLINE Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
||||||
return this->mappings.PushExchange(client, server, size, state);
|
return m_mappings.PushExchange(client, server, size, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KProcessAddress GetSendClientAddress(size_t i) const { return this->mappings.GetSendClientAddress(i); }
|
constexpr ALWAYS_INLINE KProcessAddress GetSendClientAddress(size_t i) const { return m_mappings.GetSendClientAddress(i); }
|
||||||
constexpr ALWAYS_INLINE KProcessAddress GetSendServerAddress(size_t i) const { return this->mappings.GetSendServerAddress(i); }
|
constexpr ALWAYS_INLINE KProcessAddress GetSendServerAddress(size_t i) const { return m_mappings.GetSendServerAddress(i); }
|
||||||
constexpr ALWAYS_INLINE size_t GetSendSize(size_t i) const { return this->mappings.GetSendSize(i); }
|
constexpr ALWAYS_INLINE size_t GetSendSize(size_t i) const { return m_mappings.GetSendSize(i); }
|
||||||
constexpr ALWAYS_INLINE KMemoryState GetSendMemoryState(size_t i) const { return this->mappings.GetSendMemoryState(i); }
|
constexpr ALWAYS_INLINE KMemoryState GetSendMemoryState(size_t i) const { return m_mappings.GetSendMemoryState(i); }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KProcessAddress GetReceiveClientAddress(size_t i) const { return this->mappings.GetReceiveClientAddress(i); }
|
constexpr ALWAYS_INLINE KProcessAddress GetReceiveClientAddress(size_t i) const { return m_mappings.GetReceiveClientAddress(i); }
|
||||||
constexpr ALWAYS_INLINE KProcessAddress GetReceiveServerAddress(size_t i) const { return this->mappings.GetReceiveServerAddress(i); }
|
constexpr ALWAYS_INLINE KProcessAddress GetReceiveServerAddress(size_t i) const { return m_mappings.GetReceiveServerAddress(i); }
|
||||||
constexpr ALWAYS_INLINE size_t GetReceiveSize(size_t i) const { return this->mappings.GetReceiveSize(i); }
|
constexpr ALWAYS_INLINE size_t GetReceiveSize(size_t i) const { return m_mappings.GetReceiveSize(i); }
|
||||||
constexpr ALWAYS_INLINE KMemoryState GetReceiveMemoryState(size_t i) const { return this->mappings.GetReceiveMemoryState(i); }
|
constexpr ALWAYS_INLINE KMemoryState GetReceiveMemoryState(size_t i) const { return m_mappings.GetReceiveMemoryState(i); }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KProcessAddress GetExchangeClientAddress(size_t i) const { return this->mappings.GetExchangeClientAddress(i); }
|
constexpr ALWAYS_INLINE KProcessAddress GetExchangeClientAddress(size_t i) const { return m_mappings.GetExchangeClientAddress(i); }
|
||||||
constexpr ALWAYS_INLINE KProcessAddress GetExchangeServerAddress(size_t i) const { return this->mappings.GetExchangeServerAddress(i); }
|
constexpr ALWAYS_INLINE KProcessAddress GetExchangeServerAddress(size_t i) const { return m_mappings.GetExchangeServerAddress(i); }
|
||||||
constexpr ALWAYS_INLINE size_t GetExchangeSize(size_t i) const { return this->mappings.GetExchangeSize(i); }
|
constexpr ALWAYS_INLINE size_t GetExchangeSize(size_t i) const { return m_mappings.GetExchangeSize(i); }
|
||||||
constexpr ALWAYS_INLINE KMemoryState GetExchangeMemoryState(size_t i) const { return this->mappings.GetExchangeMemoryState(i); }
|
constexpr ALWAYS_INLINE KMemoryState GetExchangeMemoryState(size_t i) const { return m_mappings.GetExchangeMemoryState(i); }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,16 +27,16 @@ namespace ams::kern {
|
||||||
class KSharedMemory final : public KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList> {
|
class KSharedMemory final : public KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
|
||||||
private:
|
private:
|
||||||
KPageGroup page_group;
|
KPageGroup m_page_group;
|
||||||
KResourceLimit *resource_limit;
|
KResourceLimit *m_resource_limit;
|
||||||
u64 owner_process_id;
|
u64 m_owner_process_id;
|
||||||
ams::svc::MemoryPermission owner_perm;
|
ams::svc::MemoryPermission m_owner_perm;
|
||||||
ams::svc::MemoryPermission remote_perm;
|
ams::svc::MemoryPermission m_remote_perm;
|
||||||
bool is_initialized;
|
bool m_is_initialized;
|
||||||
public:
|
public:
|
||||||
explicit KSharedMemory()
|
explicit KSharedMemory()
|
||||||
: page_group(std::addressof(Kernel::GetBlockInfoManager())), resource_limit(nullptr), owner_process_id(std::numeric_limits<u64>::max()),
|
: m_page_group(std::addressof(Kernel::GetBlockInfoManager())), m_resource_limit(nullptr), m_owner_process_id(std::numeric_limits<u64>::max()),
|
||||||
owner_perm(ams::svc::MemoryPermission_None), remote_perm(ams::svc::MemoryPermission_None), is_initialized(false)
|
m_owner_perm(ams::svc::MemoryPermission_None), m_remote_perm(ams::svc::MemoryPermission_None), m_is_initialized(false)
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
@ -46,14 +46,14 @@ namespace ams::kern {
|
||||||
Result Initialize(KProcess *owner, size_t size, ams::svc::MemoryPermission own_perm, ams::svc::MemoryPermission rem_perm);
|
Result Initialize(KProcess *owner, size_t size, ams::svc::MemoryPermission own_perm, ams::svc::MemoryPermission rem_perm);
|
||||||
virtual void Finalize() override;
|
virtual void Finalize() override;
|
||||||
|
|
||||||
virtual bool IsInitialized() const override { return this->is_initialized; }
|
virtual bool IsInitialized() const override { return m_is_initialized; }
|
||||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||||
|
|
||||||
Result Map(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process, ams::svc::MemoryPermission map_perm);
|
Result Map(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process, ams::svc::MemoryPermission map_perm);
|
||||||
Result Unmap(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process);
|
Result Unmap(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process);
|
||||||
|
|
||||||
u64 GetOwnerProcessId() const { return this->owner_process_id; }
|
u64 GetOwnerProcessId() const { return m_owner_process_id; }
|
||||||
size_t GetSize() const { return this->page_group.GetNumPages() * PageSize; }
|
size_t GetSize() const { return m_page_group.GetNumPages() * PageSize; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,30 +23,30 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KSharedMemoryInfo : public KSlabAllocated<KSharedMemoryInfo>, public util::IntrusiveListBaseNode<KSharedMemoryInfo> {
|
class KSharedMemoryInfo : public KSlabAllocated<KSharedMemoryInfo>, public util::IntrusiveListBaseNode<KSharedMemoryInfo> {
|
||||||
private:
|
private:
|
||||||
KSharedMemory *shared_memory;
|
KSharedMemory *m_shared_memory;
|
||||||
size_t reference_count;
|
size_t m_reference_count;
|
||||||
public:
|
public:
|
||||||
constexpr KSharedMemoryInfo() : shared_memory(), reference_count() { /* ... */ }
|
constexpr KSharedMemoryInfo() : m_shared_memory(), m_reference_count() { /* ... */ }
|
||||||
~KSharedMemoryInfo() { /* ... */ }
|
~KSharedMemoryInfo() { /* ... */ }
|
||||||
|
|
||||||
constexpr void Initialize(KSharedMemory *m) {
|
constexpr void Initialize(KSharedMemory *m) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
this->shared_memory = m;
|
m_shared_memory = m;
|
||||||
this->reference_count = 0;
|
m_reference_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void Open() {
|
constexpr void Open() {
|
||||||
const size_t ref_count = ++this->reference_count;
|
const size_t ref_count = ++m_reference_count;
|
||||||
MESOSPHERE_ASSERT(ref_count > 0);
|
MESOSPHERE_ASSERT(ref_count > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool Close() {
|
constexpr bool Close() {
|
||||||
MESOSPHERE_ASSERT(this->reference_count > 0);
|
MESOSPHERE_ASSERT(m_reference_count > 0);
|
||||||
return (--this->reference_count) == 0;
|
return (--m_reference_count) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KSharedMemory *GetSharedMemory() const { return this->shared_memory; }
|
constexpr KSharedMemory *GetSharedMemory() const { return m_shared_memory; }
|
||||||
constexpr size_t GetReferenceCount() const { return this->reference_count; }
|
constexpr size_t GetReferenceCount() const { return m_reference_count; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,28 +44,28 @@ namespace ams::kern {
|
||||||
Node *next;
|
Node *next;
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
Node * head;
|
Node * m_head;
|
||||||
size_t obj_size;
|
size_t m_obj_size;
|
||||||
public:
|
public:
|
||||||
constexpr KSlabHeapImpl() : head(nullptr), obj_size(0) { MESOSPHERE_ASSERT_THIS(); }
|
constexpr KSlabHeapImpl() : m_head(nullptr), m_obj_size(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
||||||
void Initialize(size_t size) {
|
void Initialize(size_t size) {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(this->head == nullptr);
|
MESOSPHERE_INIT_ABORT_UNLESS(m_head == nullptr);
|
||||||
this->obj_size = size;
|
m_obj_size = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node *GetHead() const {
|
Node *GetHead() const {
|
||||||
return this->head;
|
return m_head;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetObjectSize() const {
|
size_t GetObjectSize() const {
|
||||||
return this->obj_size;
|
return m_obj_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *Allocate() {
|
void *Allocate() {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
return AllocateFromSlabAtomic(std::addressof(this->head));
|
return AllocateFromSlabAtomic(std::addressof(m_head));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Free(void *obj) {
|
void Free(void *obj) {
|
||||||
|
@ -73,7 +73,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
Node *node = reinterpret_cast<Node *>(obj);
|
Node *node = reinterpret_cast<Node *>(obj);
|
||||||
|
|
||||||
return FreeToSlabAtomic(std::addressof(this->head), node);
|
return FreeToSlabAtomic(std::addressof(m_head), node);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -85,22 +85,22 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
using Impl = impl::KSlabHeapImpl;
|
using Impl = impl::KSlabHeapImpl;
|
||||||
private:
|
private:
|
||||||
Impl impl;
|
Impl m_impl;
|
||||||
uintptr_t peak;
|
uintptr_t m_peak;
|
||||||
uintptr_t start;
|
uintptr_t m_start;
|
||||||
uintptr_t end;
|
uintptr_t m_end;
|
||||||
private:
|
private:
|
||||||
ALWAYS_INLINE Impl *GetImpl() {
|
ALWAYS_INLINE Impl *GetImpl() {
|
||||||
return std::addressof(this->impl);
|
return std::addressof(m_impl);
|
||||||
}
|
}
|
||||||
ALWAYS_INLINE const Impl *GetImpl() const {
|
ALWAYS_INLINE const Impl *GetImpl() const {
|
||||||
return std::addressof(this->impl);
|
return std::addressof(m_impl);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KSlabHeapBase() : impl(), peak(0), start(0), end(0) { MESOSPHERE_ASSERT_THIS(); }
|
constexpr KSlabHeapBase() : m_impl(), m_peak(0), m_start(0), m_end(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
||||||
ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
||||||
return this->start <= address && address < this->end;
|
return m_start <= address && address < m_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeImpl(size_t obj_size, void *memory, size_t memory_size) {
|
void InitializeImpl(size_t obj_size, void *memory, size_t memory_size) {
|
||||||
|
@ -114,12 +114,12 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Set our tracking variables. */
|
/* Set our tracking variables. */
|
||||||
const size_t num_obj = (memory_size / obj_size);
|
const size_t num_obj = (memory_size / obj_size);
|
||||||
this->start = reinterpret_cast<uintptr_t>(memory);
|
m_start = reinterpret_cast<uintptr_t>(memory);
|
||||||
this->end = this->start + num_obj * obj_size;
|
m_end = m_start + num_obj * obj_size;
|
||||||
this->peak = this->start;
|
m_peak = m_start;
|
||||||
|
|
||||||
/* Free the objects. */
|
/* Free the objects. */
|
||||||
u8 *cur = reinterpret_cast<u8 *>(this->end);
|
u8 *cur = reinterpret_cast<u8 *>(m_end);
|
||||||
|
|
||||||
for (size_t i = 0; i < num_obj; i++) {
|
for (size_t i = 0; i < num_obj; i++) {
|
||||||
cur -= obj_size;
|
cur -= obj_size;
|
||||||
|
@ -128,7 +128,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetSlabHeapSize() const {
|
size_t GetSlabHeapSize() const {
|
||||||
return (this->end - this->start) / this->GetObjectSize();
|
return (m_end - m_start) / this->GetObjectSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetObjectSize() const {
|
size_t GetObjectSize() const {
|
||||||
|
@ -144,10 +144,10 @@ namespace ams::kern {
|
||||||
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
|
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
|
||||||
if (AMS_LIKELY(obj != nullptr)) {
|
if (AMS_LIKELY(obj != nullptr)) {
|
||||||
static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free);
|
static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free);
|
||||||
std::atomic_ref<uintptr_t> peak_ref(this->peak);
|
std::atomic_ref<uintptr_t> peak_ref(m_peak);
|
||||||
|
|
||||||
const uintptr_t alloc_peak = reinterpret_cast<uintptr_t>(obj) + this->GetObjectSize();
|
const uintptr_t alloc_peak = reinterpret_cast<uintptr_t>(obj) + this->GetObjectSize();
|
||||||
uintptr_t cur_peak = this->peak;
|
uintptr_t cur_peak = m_peak;
|
||||||
do {
|
do {
|
||||||
if (alloc_peak <= cur_peak) {
|
if (alloc_peak <= cur_peak) {
|
||||||
break;
|
break;
|
||||||
|
@ -169,15 +169,15 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetObjectIndexImpl(const void *obj) const {
|
size_t GetObjectIndexImpl(const void *obj) const {
|
||||||
return (reinterpret_cast<uintptr_t>(obj) - this->start) / this->GetObjectSize();
|
return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetPeakIndex() const {
|
size_t GetPeakIndex() const {
|
||||||
return this->GetObjectIndexImpl(reinterpret_cast<const void *>(this->peak));
|
return this->GetObjectIndexImpl(reinterpret_cast<const void *>(m_peak));
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t GetSlabHeapAddress() const {
|
uintptr_t GetSlabHeapAddress() const {
|
||||||
return this->start;
|
return m_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetNumRemaining() const {
|
size_t GetNumRemaining() const {
|
||||||
|
|
|
@ -30,10 +30,10 @@ namespace ams::kern {
|
||||||
KThread *thread;
|
KThread *thread;
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
ThreadListNode *thread_list_head;
|
ThreadListNode *m_thread_list_head;
|
||||||
ThreadListNode *thread_list_tail;
|
ThreadListNode *m_thread_list_tail;
|
||||||
protected:
|
protected:
|
||||||
constexpr ALWAYS_INLINE explicit KSynchronizationObject() : KAutoObjectWithList(), thread_list_head(), thread_list_tail() { MESOSPHERE_ASSERT_THIS(); }
|
constexpr ALWAYS_INLINE explicit KSynchronizationObject() : KAutoObjectWithList(), m_thread_list_head(), m_thread_list_tail() { MESOSPHERE_ASSERT_THIS(); }
|
||||||
virtual ~KSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); }
|
virtual ~KSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
||||||
virtual void OnFinalizeSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); }
|
virtual void OnFinalizeSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); }
|
||||||
|
|
|
@ -96,44 +96,44 @@ namespace ams::kern {
|
||||||
|
|
||||||
struct QueueEntry {
|
struct QueueEntry {
|
||||||
private:
|
private:
|
||||||
KThread *prev;
|
KThread *m_prev;
|
||||||
KThread *next;
|
KThread *m_next;
|
||||||
public:
|
public:
|
||||||
constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ }
|
constexpr QueueEntry() : m_prev(nullptr), m_next(nullptr) { /* ... */ }
|
||||||
|
|
||||||
constexpr void Initialize() {
|
constexpr void Initialize() {
|
||||||
this->prev = nullptr;
|
m_prev = nullptr;
|
||||||
this->next = nullptr;
|
m_next = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KThread *GetPrev() const { return this->prev; }
|
constexpr KThread *GetPrev() const { return m_prev; }
|
||||||
constexpr KThread *GetNext() const { return this->next; }
|
constexpr KThread *GetNext() const { return m_next; }
|
||||||
constexpr void SetPrev(KThread *t) { this->prev = t; }
|
constexpr void SetPrev(KThread *t) { m_prev = t; }
|
||||||
constexpr void SetNext(KThread *t) { this->next = t; }
|
constexpr void SetNext(KThread *t) { m_next = t; }
|
||||||
};
|
};
|
||||||
|
|
||||||
using WaiterList = util::IntrusiveListBaseTraits<KThread>::ListType;
|
using WaiterList = util::IntrusiveListBaseTraits<KThread>::ListType;
|
||||||
private:
|
private:
|
||||||
static constexpr size_t PriorityInheritanceCountMax = 10;
|
static constexpr size_t PriorityInheritanceCountMax = 10;
|
||||||
union SyncObjectBuffer {
|
union SyncObjectBuffer {
|
||||||
KSynchronizationObject *sync_objects[ams::svc::ArgumentHandleCountMax];
|
KSynchronizationObject *m_sync_objects[ams::svc::ArgumentHandleCountMax];
|
||||||
ams::svc::Handle handles[ams::svc::ArgumentHandleCountMax * (sizeof(KSynchronizationObject *) / sizeof(ams::svc::Handle))];
|
ams::svc::Handle m_handles[ams::svc::ArgumentHandleCountMax * (sizeof(KSynchronizationObject *) / sizeof(ams::svc::Handle))];
|
||||||
|
|
||||||
constexpr SyncObjectBuffer() : sync_objects() { /* ... */ }
|
constexpr SyncObjectBuffer() : m_sync_objects() { /* ... */ }
|
||||||
};
|
};
|
||||||
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
|
static_assert(sizeof(SyncObjectBuffer::m_sync_objects) == sizeof(SyncObjectBuffer::m_handles));
|
||||||
|
|
||||||
struct ConditionVariableComparator {
|
struct ConditionVariableComparator {
|
||||||
struct LightCompareType {
|
struct LightCompareType {
|
||||||
uintptr_t cv_key;
|
uintptr_t m_cv_key;
|
||||||
s32 priority;
|
s32 m_priority;
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE uintptr_t GetConditionVariableKey() const {
|
constexpr ALWAYS_INLINE uintptr_t GetConditionVariableKey() const {
|
||||||
return this->cv_key;
|
return m_cv_key;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE s32 GetPriority() const {
|
constexpr ALWAYS_INLINE s32 GetPriority() const {
|
||||||
return this->priority;
|
return m_priority;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -158,65 +158,65 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
static inline std::atomic<u64> s_next_thread_id = 0;
|
static inline std::atomic<u64> s_next_thread_id = 0;
|
||||||
private:
|
private:
|
||||||
alignas(16) KThreadContext thread_context{};
|
alignas(16) KThreadContext m_thread_context{};
|
||||||
util::IntrusiveListNode process_list_node{};
|
util::IntrusiveListNode m_process_list_node{};
|
||||||
util::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
|
util::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
|
||||||
s32 priority{};
|
s32 m_priority{};
|
||||||
|
|
||||||
using ConditionVariableThreadTreeTraits = util::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&KThread::condvar_arbiter_tree_node>;
|
using ConditionVariableThreadTreeTraits = util::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&KThread::m_condvar_arbiter_tree_node>;
|
||||||
using ConditionVariableThreadTree = ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
|
using ConditionVariableThreadTree = ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
|
||||||
|
|
||||||
ConditionVariableThreadTree *condvar_tree{};
|
ConditionVariableThreadTree *m_condvar_tree{};
|
||||||
uintptr_t condvar_key{};
|
uintptr_t m_condvar_key{};
|
||||||
u64 virtual_affinity_mask{};
|
u64 m_virtual_affinity_mask{};
|
||||||
KAffinityMask physical_affinity_mask{};
|
KAffinityMask m_physical_affinity_mask{};
|
||||||
u64 thread_id{};
|
u64 m_thread_id{};
|
||||||
std::atomic<s64> cpu_time{};
|
std::atomic<s64> m_cpu_time{};
|
||||||
KSynchronizationObject *synced_object{};
|
KSynchronizationObject *m_synced_object{};
|
||||||
KProcessAddress address_key{};
|
KProcessAddress m_address_key{};
|
||||||
KProcess *parent{};
|
KProcess *m_parent{};
|
||||||
void *kernel_stack_top{};
|
void *m_kernel_stack_top{};
|
||||||
u32 *light_ipc_data{};
|
u32 *m_light_ipc_data{};
|
||||||
KProcessAddress tls_address{};
|
KProcessAddress m_tls_address{};
|
||||||
void *tls_heap_address{};
|
void *m_tls_heap_address{};
|
||||||
KLightLock activity_pause_lock{};
|
KLightLock m_activity_pause_lock{};
|
||||||
SyncObjectBuffer sync_object_buffer{};
|
SyncObjectBuffer m_sync_object_buffer{};
|
||||||
s64 schedule_count{};
|
s64 m_schedule_count{};
|
||||||
s64 last_scheduled_tick{};
|
s64 m_last_scheduled_tick{};
|
||||||
QueueEntry per_core_priority_queue_entry[cpu::NumCores]{};
|
QueueEntry m_per_core_priority_queue_entry[cpu::NumCores]{};
|
||||||
KLightLock *waiting_lock{};
|
KLightLock *m_waiting_lock{};
|
||||||
|
|
||||||
KThreadQueue *sleeping_queue{};
|
KThreadQueue *m_sleeping_queue{};
|
||||||
|
|
||||||
WaiterList waiter_list{};
|
WaiterList m_waiter_list{};
|
||||||
WaiterList pinned_waiter_list{};
|
WaiterList m_pinned_waiter_list{};
|
||||||
KThread *lock_owner{};
|
KThread *m_lock_owner{};
|
||||||
uintptr_t debug_params[3]{};
|
uintptr_t m_debug_params[3]{};
|
||||||
u32 address_key_value{};
|
u32 m_address_key_value{};
|
||||||
u32 suspend_request_flags{};
|
u32 m_suspend_request_flags{};
|
||||||
u32 suspend_allowed_flags{};
|
u32 m_suspend_allowed_flags{};
|
||||||
Result wait_result;
|
Result m_wait_result;
|
||||||
Result debug_exception_result;
|
Result m_debug_exception_result;
|
||||||
s32 base_priority{};
|
s32 m_base_priority{};
|
||||||
s32 physical_ideal_core_id{};
|
s32 m_physical_ideal_core_id{};
|
||||||
s32 virtual_ideal_core_id{};
|
s32 m_virtual_ideal_core_id{};
|
||||||
s32 num_kernel_waiters{};
|
s32 m_num_kernel_waiters{};
|
||||||
s32 current_core_id{};
|
s32 m_current_core_id{};
|
||||||
s32 core_id{};
|
s32 m_core_id{};
|
||||||
KAffinityMask original_physical_affinity_mask{};
|
KAffinityMask m_original_physical_affinity_mask{};
|
||||||
s32 original_physical_ideal_core_id{};
|
s32 m_original_physical_ideal_core_id{};
|
||||||
s32 num_core_migration_disables{};
|
s32 m_num_core_migration_disables{};
|
||||||
ThreadState thread_state{};
|
ThreadState m_thread_state{};
|
||||||
std::atomic<bool> termination_requested{};
|
std::atomic<bool> m_termination_requested{};
|
||||||
bool wait_cancelled{};
|
bool m_wait_cancelled{};
|
||||||
bool cancellable{};
|
bool m_cancellable{};
|
||||||
bool signaled{};
|
bool m_signaled{};
|
||||||
bool initialized{};
|
bool m_initialized{};
|
||||||
bool debug_attached{};
|
bool m_debug_attached{};
|
||||||
s8 priority_inheritance_count{};
|
s8 m_priority_inheritance_count{};
|
||||||
bool resource_limit_release_hint{};
|
bool m_resource_limit_release_hint{};
|
||||||
public:
|
public:
|
||||||
constexpr KThread() : wait_result(svc::ResultNoSynchronizationObject()), debug_exception_result(ResultSuccess()) { /* ... */ }
|
constexpr KThread() : m_wait_result(svc::ResultNoSynchronizationObject()), m_debug_exception_result(ResultSuccess()) { /* ... */ }
|
||||||
|
|
||||||
virtual ~KThread() { /* ... */ }
|
virtual ~KThread() { /* ... */ }
|
||||||
|
|
||||||
|
@ -240,15 +240,15 @@ namespace ams::kern {
|
||||||
static void ResumeThreadsSuspendedForInit();
|
static void ResumeThreadsSuspendedForInit();
|
||||||
private:
|
private:
|
||||||
StackParameters &GetStackParameters() {
|
StackParameters &GetStackParameters() {
|
||||||
return *(reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1);
|
return *(reinterpret_cast<StackParameters *>(m_kernel_stack_top) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
const StackParameters &GetStackParameters() const {
|
const StackParameters &GetStackParameters() const {
|
||||||
return *(reinterpret_cast<const StackParameters *>(this->kernel_stack_top) - 1);
|
return *(reinterpret_cast<const StackParameters *>(m_kernel_stack_top) - 1);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
StackParameters &GetStackParametersForExceptionSvcPermission() {
|
StackParameters &GetStackParametersForExceptionSvcPermission() {
|
||||||
return *(reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1);
|
return *(reinterpret_cast<StackParameters *>(m_kernel_stack_top) - 1);
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE s32 GetDisableDispatchCount() const {
|
ALWAYS_INLINE s32 GetDisableDispatchCount() const {
|
||||||
|
@ -272,15 +272,15 @@ namespace ams::kern {
|
||||||
void Unpin();
|
void Unpin();
|
||||||
|
|
||||||
ALWAYS_INLINE void SaveDebugParams(uintptr_t param1, uintptr_t param2, uintptr_t param3) {
|
ALWAYS_INLINE void SaveDebugParams(uintptr_t param1, uintptr_t param2, uintptr_t param3) {
|
||||||
this->debug_params[0] = param1;
|
m_debug_params[0] = param1;
|
||||||
this->debug_params[1] = param2;
|
m_debug_params[1] = param2;
|
||||||
this->debug_params[2] = param3;
|
m_debug_params[2] = param3;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void RestoreDebugParams(uintptr_t *param1, uintptr_t *param2, uintptr_t *param3) {
|
ALWAYS_INLINE void RestoreDebugParams(uintptr_t *param1, uintptr_t *param2, uintptr_t *param3) {
|
||||||
*param1 = this->debug_params[0];
|
*param1 = m_debug_params[0];
|
||||||
*param2 = this->debug_params[1];
|
*param2 = m_debug_params[1];
|
||||||
*param3 = this->debug_params[2];
|
*param3 = m_debug_params[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE void DisableCoreMigration();
|
NOINLINE void DisableCoreMigration();
|
||||||
|
@ -336,157 +336,157 @@ namespace ams::kern {
|
||||||
void StartTermination();
|
void StartTermination();
|
||||||
void FinishTermination();
|
void FinishTermination();
|
||||||
public:
|
public:
|
||||||
constexpr u64 GetThreadId() const { return this->thread_id; }
|
constexpr u64 GetThreadId() const { return m_thread_id; }
|
||||||
|
|
||||||
constexpr KThreadContext &GetContext() { return this->thread_context; }
|
constexpr KThreadContext &GetContext() { return m_thread_context; }
|
||||||
constexpr const KThreadContext &GetContext() const { return this->thread_context; }
|
constexpr const KThreadContext &GetContext() const { return m_thread_context; }
|
||||||
|
|
||||||
constexpr u64 GetVirtualAffinityMask() const { return this->virtual_affinity_mask; }
|
constexpr u64 GetVirtualAffinityMask() const { return m_virtual_affinity_mask; }
|
||||||
constexpr const KAffinityMask &GetAffinityMask() const { return this->physical_affinity_mask; }
|
constexpr const KAffinityMask &GetAffinityMask() const { return m_physical_affinity_mask; }
|
||||||
|
|
||||||
Result GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask);
|
Result GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask);
|
||||||
Result SetCoreMask(int32_t ideal_core, u64 affinity_mask);
|
Result SetCoreMask(int32_t ideal_core, u64 affinity_mask);
|
||||||
|
|
||||||
Result GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask);
|
Result GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask);
|
||||||
|
|
||||||
constexpr ThreadState GetState() const { return static_cast<ThreadState>(this->thread_state & ThreadState_Mask); }
|
constexpr ThreadState GetState() const { return static_cast<ThreadState>(m_thread_state & ThreadState_Mask); }
|
||||||
constexpr ThreadState GetRawState() const { return this->thread_state; }
|
constexpr ThreadState GetRawState() const { return m_thread_state; }
|
||||||
NOINLINE void SetState(ThreadState state);
|
NOINLINE void SetState(ThreadState state);
|
||||||
|
|
||||||
NOINLINE KThreadContext *GetContextForSchedulerLoop();
|
NOINLINE KThreadContext *GetContextForSchedulerLoop();
|
||||||
|
|
||||||
constexpr uintptr_t GetConditionVariableKey() const { return this->condvar_key; }
|
constexpr uintptr_t GetConditionVariableKey() const { return m_condvar_key; }
|
||||||
constexpr uintptr_t GetAddressArbiterKey() const { return this->condvar_key; }
|
constexpr uintptr_t GetAddressArbiterKey() const { return m_condvar_key; }
|
||||||
|
|
||||||
constexpr void SetConditionVariable(ConditionVariableThreadTree *tree, KProcessAddress address, uintptr_t cv_key, u32 value) {
|
constexpr void SetConditionVariable(ConditionVariableThreadTree *tree, KProcessAddress address, uintptr_t cv_key, u32 value) {
|
||||||
this->condvar_tree = tree;
|
m_condvar_tree = tree;
|
||||||
this->condvar_key = cv_key;
|
m_condvar_key = cv_key;
|
||||||
this->address_key = address;
|
m_address_key = address;
|
||||||
this->address_key_value = value;
|
m_address_key_value = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void ClearConditionVariable() {
|
constexpr void ClearConditionVariable() {
|
||||||
this->condvar_tree = nullptr;
|
m_condvar_tree = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsWaitingForConditionVariable() const {
|
constexpr bool IsWaitingForConditionVariable() const {
|
||||||
return this->condvar_tree != nullptr;
|
return m_condvar_tree != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void SetAddressArbiter(ConditionVariableThreadTree *tree, uintptr_t address) {
|
constexpr void SetAddressArbiter(ConditionVariableThreadTree *tree, uintptr_t address) {
|
||||||
this->condvar_tree = tree;
|
m_condvar_tree = tree;
|
||||||
this->condvar_key = address;
|
m_condvar_key = address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void ClearAddressArbiter() {
|
constexpr void ClearAddressArbiter() {
|
||||||
this->condvar_tree = nullptr;
|
m_condvar_tree = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsWaitingForAddressArbiter() const {
|
constexpr bool IsWaitingForAddressArbiter() const {
|
||||||
return this->condvar_tree != nullptr;
|
return m_condvar_tree != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr s32 GetIdealVirtualCore() const { return this->virtual_ideal_core_id; }
|
constexpr s32 GetIdealVirtualCore() const { return m_virtual_ideal_core_id; }
|
||||||
constexpr s32 GetIdealPhysicalCore() const { return this->physical_ideal_core_id; }
|
constexpr s32 GetIdealPhysicalCore() const { return m_physical_ideal_core_id; }
|
||||||
|
|
||||||
constexpr s32 GetActiveCore() const { return this->core_id; }
|
constexpr s32 GetActiveCore() const { return m_core_id; }
|
||||||
constexpr void SetActiveCore(s32 core) { this->core_id = core; }
|
constexpr void SetActiveCore(s32 core) { m_core_id = core; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE s32 GetCurrentCore() const { return this->current_core_id; }
|
constexpr ALWAYS_INLINE s32 GetCurrentCore() const { return m_current_core_id; }
|
||||||
constexpr void SetCurrentCore(s32 core) { this->current_core_id = core; }
|
constexpr void SetCurrentCore(s32 core) { m_current_core_id = core; }
|
||||||
|
|
||||||
constexpr s32 GetPriority() const { return this->priority; }
|
constexpr s32 GetPriority() const { return m_priority; }
|
||||||
constexpr void SetPriority(s32 prio) { this->priority = prio; }
|
constexpr void SetPriority(s32 prio) { m_priority = prio; }
|
||||||
|
|
||||||
constexpr s32 GetBasePriority() const { return this->base_priority; }
|
constexpr s32 GetBasePriority() const { return m_base_priority; }
|
||||||
|
|
||||||
constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; }
|
constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return m_per_core_priority_queue_entry[core]; }
|
||||||
constexpr const QueueEntry &GetPriorityQueueEntry(s32 core) const { return this->per_core_priority_queue_entry[core]; }
|
constexpr const QueueEntry &GetPriorityQueueEntry(s32 core) const { return m_per_core_priority_queue_entry[core]; }
|
||||||
|
|
||||||
constexpr void SetSleepingQueue(KThreadQueue *q) { this->sleeping_queue = q; }
|
constexpr void SetSleepingQueue(KThreadQueue *q) { m_sleeping_queue = q; }
|
||||||
|
|
||||||
constexpr ConditionVariableThreadTree *GetConditionVariableTree() const { return this->condvar_tree; }
|
constexpr ConditionVariableThreadTree *GetConditionVariableTree() const { return m_condvar_tree; }
|
||||||
|
|
||||||
constexpr s32 GetNumKernelWaiters() const { return this->num_kernel_waiters; }
|
constexpr s32 GetNumKernelWaiters() const { return m_num_kernel_waiters; }
|
||||||
|
|
||||||
void AddWaiter(KThread *thread);
|
void AddWaiter(KThread *thread);
|
||||||
void RemoveWaiter(KThread *thread);
|
void RemoveWaiter(KThread *thread);
|
||||||
KThread *RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key);
|
KThread *RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key);
|
||||||
|
|
||||||
constexpr KProcessAddress GetAddressKey() const { return this->address_key; }
|
constexpr KProcessAddress GetAddressKey() const { return m_address_key; }
|
||||||
constexpr u32 GetAddressKeyValue() const { return this->address_key_value; }
|
constexpr u32 GetAddressKeyValue() const { return m_address_key_value; }
|
||||||
constexpr void SetAddressKey(KProcessAddress key) { this->address_key = key; }
|
constexpr void SetAddressKey(KProcessAddress key) { m_address_key = key; }
|
||||||
constexpr void SetAddressKey(KProcessAddress key, u32 val) { this->address_key = key; this->address_key_value = val; }
|
constexpr void SetAddressKey(KProcessAddress key, u32 val) { m_address_key = key; m_address_key_value = val; }
|
||||||
|
|
||||||
constexpr void SetLockOwner(KThread *owner) { this->lock_owner = owner; }
|
constexpr void SetLockOwner(KThread *owner) { m_lock_owner = owner; }
|
||||||
constexpr KThread *GetLockOwner() const { return this->lock_owner; }
|
constexpr KThread *GetLockOwner() const { return m_lock_owner; }
|
||||||
|
|
||||||
constexpr void SetSyncedObject(KSynchronizationObject *obj, Result wait_res) {
|
constexpr void SetSyncedObject(KSynchronizationObject *obj, Result wait_res) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
this->synced_object = obj;
|
m_synced_object = obj;
|
||||||
this->wait_result = wait_res;
|
m_wait_result = wait_res;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Result GetWaitResult(KSynchronizationObject **out) const {
|
constexpr Result GetWaitResult(KSynchronizationObject **out) const {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
*out = this->synced_object;
|
*out = m_synced_object;
|
||||||
return this->wait_result;
|
return m_wait_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void SetDebugExceptionResult(Result result) {
|
constexpr void SetDebugExceptionResult(Result result) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
this->debug_exception_result = result;
|
m_debug_exception_result = result;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Result GetDebugExceptionResult() const {
|
constexpr Result GetDebugExceptionResult() const {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
return this->debug_exception_result;
|
return m_debug_exception_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitCancel();
|
void WaitCancel();
|
||||||
|
|
||||||
bool IsWaitCancelled() const { return this->wait_cancelled; }
|
bool IsWaitCancelled() const { return m_wait_cancelled; }
|
||||||
void ClearWaitCancelled() { this->wait_cancelled = false; }
|
void ClearWaitCancelled() { m_wait_cancelled = false; }
|
||||||
|
|
||||||
void ClearCancellable() { this->cancellable = false; }
|
void ClearCancellable() { m_cancellable = false; }
|
||||||
void SetCancellable() { this->cancellable = true; }
|
void SetCancellable() { m_cancellable = true; }
|
||||||
|
|
||||||
constexpr u32 *GetLightSessionData() const { return this->light_ipc_data; }
|
constexpr u32 *GetLightSessionData() const { return m_light_ipc_data; }
|
||||||
constexpr void SetLightSessionData(u32 *data) { this->light_ipc_data = data; }
|
constexpr void SetLightSessionData(u32 *data) { m_light_ipc_data = data; }
|
||||||
|
|
||||||
bool HasWaiters() const { return !this->waiter_list.empty(); }
|
bool HasWaiters() const { return !m_waiter_list.empty(); }
|
||||||
|
|
||||||
constexpr s64 GetLastScheduledTick() const { return this->last_scheduled_tick; }
|
constexpr s64 GetLastScheduledTick() const { return m_last_scheduled_tick; }
|
||||||
constexpr void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; }
|
constexpr void SetLastScheduledTick(s64 tick) { m_last_scheduled_tick = tick; }
|
||||||
|
|
||||||
constexpr s64 GetYieldScheduleCount() const { return this->schedule_count; }
|
constexpr s64 GetYieldScheduleCount() const { return m_schedule_count; }
|
||||||
constexpr void SetYieldScheduleCount(s64 count) { this->schedule_count = count; }
|
constexpr void SetYieldScheduleCount(s64 count) { m_schedule_count = count; }
|
||||||
|
|
||||||
constexpr KProcess *GetOwnerProcess() const { return this->parent; }
|
constexpr KProcess *GetOwnerProcess() const { return m_parent; }
|
||||||
constexpr bool IsUserThread() const { return this->parent != nullptr; }
|
constexpr bool IsUserThread() const { return m_parent != nullptr; }
|
||||||
|
|
||||||
constexpr KProcessAddress GetThreadLocalRegionAddress() const { return this->tls_address; }
|
constexpr KProcessAddress GetThreadLocalRegionAddress() const { return m_tls_address; }
|
||||||
constexpr void *GetThreadLocalRegionHeapAddress() const { return this->tls_heap_address; }
|
constexpr void *GetThreadLocalRegionHeapAddress() const { return m_tls_heap_address; }
|
||||||
|
|
||||||
constexpr KSynchronizationObject **GetSynchronizationObjectBuffer() { return std::addressof(this->sync_object_buffer.sync_objects[0]); }
|
constexpr KSynchronizationObject **GetSynchronizationObjectBuffer() { return std::addressof(m_sync_object_buffer.m_sync_objects[0]); }
|
||||||
constexpr ams::svc::Handle *GetHandleBuffer() { return std::addressof(this->sync_object_buffer.handles[sizeof(this->sync_object_buffer.sync_objects) / sizeof(ams::svc::Handle) - ams::svc::ArgumentHandleCountMax]); }
|
constexpr ams::svc::Handle *GetHandleBuffer() { return std::addressof(m_sync_object_buffer.m_handles[sizeof(m_sync_object_buffer.m_sync_objects) / sizeof(ams::svc::Handle) - ams::svc::ArgumentHandleCountMax]); }
|
||||||
|
|
||||||
u16 GetUserDisableCount() const { return static_cast<ams::svc::ThreadLocalRegion *>(this->tls_heap_address)->disable_count; }
|
u16 GetUserDisableCount() const { return static_cast<ams::svc::ThreadLocalRegion *>(m_tls_heap_address)->disable_count; }
|
||||||
void SetInterruptFlag() const { static_cast<ams::svc::ThreadLocalRegion *>(this->tls_heap_address)->interrupt_flag = 1; }
|
void SetInterruptFlag() const { static_cast<ams::svc::ThreadLocalRegion *>(m_tls_heap_address)->interrupt_flag = 1; }
|
||||||
void ClearInterruptFlag() const { static_cast<ams::svc::ThreadLocalRegion *>(this->tls_heap_address)->interrupt_flag = 0; }
|
void ClearInterruptFlag() const { static_cast<ams::svc::ThreadLocalRegion *>(m_tls_heap_address)->interrupt_flag = 0; }
|
||||||
|
|
||||||
constexpr void SetDebugAttached() { this->debug_attached = true; }
|
constexpr void SetDebugAttached() { m_debug_attached = true; }
|
||||||
constexpr bool IsAttachedToDebugger() const { return this->debug_attached; }
|
constexpr bool IsAttachedToDebugger() const { return m_debug_attached; }
|
||||||
|
|
||||||
void AddCpuTime(s32 core_id, s64 amount) {
|
void AddCpuTime(s32 core_id, s64 amount) {
|
||||||
this->cpu_time += amount;
|
m_cpu_time += amount;
|
||||||
/* TODO: Debug kernels track per-core tick counts. Should we? */
|
/* TODO: Debug kernels track per-core tick counts. Should we? */
|
||||||
MESOSPHERE_UNUSED(core_id);
|
MESOSPHERE_UNUSED(core_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
s64 GetCpuTime() const { return this->cpu_time; }
|
s64 GetCpuTime() const { return m_cpu_time; }
|
||||||
|
|
||||||
s64 GetCpuTime(s32 core_id) const {
|
s64 GetCpuTime(s32 core_id) const {
|
||||||
MESOSPHERE_ABORT_UNLESS(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
MESOSPHERE_ABORT_UNLESS(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
||||||
|
@ -495,10 +495,10 @@ namespace ams::kern {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u32 GetSuspendFlags() const { return this->suspend_allowed_flags & this->suspend_request_flags; }
|
constexpr u32 GetSuspendFlags() const { return m_suspend_allowed_flags & m_suspend_request_flags; }
|
||||||
constexpr bool IsSuspended() const { return this->GetSuspendFlags() != 0; }
|
constexpr bool IsSuspended() const { return this->GetSuspendFlags() != 0; }
|
||||||
constexpr bool IsSuspendRequested(SuspendType type) const { return (this->suspend_request_flags & (1u << (ThreadState_SuspendShift + type))) != 0; }
|
constexpr bool IsSuspendRequested(SuspendType type) const { return (m_suspend_request_flags & (1u << (ThreadState_SuspendShift + type))) != 0; }
|
||||||
constexpr bool IsSuspendRequested() const { return this->suspend_request_flags != 0; }
|
constexpr bool IsSuspendRequested() const { return m_suspend_request_flags != 0; }
|
||||||
void RequestSuspend(SuspendType type);
|
void RequestSuspend(SuspendType type);
|
||||||
void Resume(SuspendType type);
|
void Resume(SuspendType type);
|
||||||
void TrySuspend();
|
void TrySuspend();
|
||||||
|
@ -526,11 +526,11 @@ namespace ams::kern {
|
||||||
|
|
||||||
Result Sleep(s64 timeout);
|
Result Sleep(s64 timeout);
|
||||||
|
|
||||||
ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1; }
|
ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast<StackParameters *>(m_kernel_stack_top) - 1; }
|
||||||
ALWAYS_INLINE void *GetKernelStackTop() const { return this->kernel_stack_top; }
|
ALWAYS_INLINE void *GetKernelStackTop() const { return m_kernel_stack_top; }
|
||||||
|
|
||||||
ALWAYS_INLINE bool IsTerminationRequested() const {
|
ALWAYS_INLINE bool IsTerminationRequested() const {
|
||||||
return this->termination_requested || this->GetRawState() == ThreadState_Terminated;
|
return m_termination_requested || this->GetRawState() == ThreadState_Terminated;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetKernelStackUsage() const;
|
size_t GetKernelStackUsage() const;
|
||||||
|
@ -538,8 +538,8 @@ namespace ams::kern {
|
||||||
/* Overridden parent functions. */
|
/* Overridden parent functions. */
|
||||||
virtual u64 GetId() const override final { return this->GetThreadId(); }
|
virtual u64 GetId() const override final { return this->GetThreadId(); }
|
||||||
|
|
||||||
virtual bool IsInitialized() const override { return this->initialized; }
|
virtual bool IsInitialized() const override { return m_initialized; }
|
||||||
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(this->parent) | (this->resource_limit_release_hint ? 1 : 0); }
|
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(m_parent) | (m_resource_limit_release_hint ? 1 : 0); }
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg);
|
static void PostDestroy(uintptr_t arg);
|
||||||
|
|
||||||
|
|
|
@ -29,19 +29,19 @@ namespace ams::kern {
|
||||||
static constexpr size_t RegionsPerPage = PageSize / ams::svc::ThreadLocalRegionSize;
|
static constexpr size_t RegionsPerPage = PageSize / ams::svc::ThreadLocalRegionSize;
|
||||||
static_assert(RegionsPerPage > 0);
|
static_assert(RegionsPerPage > 0);
|
||||||
private:
|
private:
|
||||||
KProcessAddress virt_addr;
|
KProcessAddress m_virt_addr;
|
||||||
KProcess *owner;
|
KProcess *m_owner;
|
||||||
bool is_region_free[RegionsPerPage];
|
bool m_is_region_free[RegionsPerPage];
|
||||||
public:
|
public:
|
||||||
constexpr explicit KThreadLocalPage(KProcessAddress addr) : virt_addr(addr), owner(nullptr), is_region_free() {
|
constexpr explicit KThreadLocalPage(KProcessAddress addr) : m_virt_addr(addr), m_owner(nullptr), m_is_region_free() {
|
||||||
for (size_t i = 0; i < RegionsPerPage; i++) {
|
for (size_t i = 0; i < RegionsPerPage; i++) {
|
||||||
this->is_region_free[i] = true;
|
m_is_region_free[i] = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr explicit KThreadLocalPage() : KThreadLocalPage(Null<KProcessAddress>) { /* ... */ }
|
constexpr explicit KThreadLocalPage() : KThreadLocalPage(Null<KProcessAddress>) { /* ... */ }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KProcessAddress GetAddress() const { return this->virt_addr; }
|
constexpr ALWAYS_INLINE KProcessAddress GetAddress() const { return m_virt_addr; }
|
||||||
|
|
||||||
static constexpr ALWAYS_INLINE int Compare(const KThreadLocalPage &lhs, const KThreadLocalPage &rhs) {
|
static constexpr ALWAYS_INLINE int Compare(const KThreadLocalPage &lhs, const KThreadLocalPage &rhs) {
|
||||||
const KProcessAddress lval = lhs.GetAddress();
|
const KProcessAddress lval = lhs.GetAddress();
|
||||||
|
@ -80,7 +80,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
bool IsAllUsed() const {
|
bool IsAllUsed() const {
|
||||||
for (size_t i = 0; i < RegionsPerPage; i++) {
|
for (size_t i = 0; i < RegionsPerPage; i++) {
|
||||||
if (this->is_region_free[i]) {
|
if (m_is_region_free[i]) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
bool IsAllFree() const {
|
bool IsAllFree() const {
|
||||||
for (size_t i = 0; i < RegionsPerPage; i++) {
|
for (size_t i = 0; i < RegionsPerPage; i++) {
|
||||||
if (!this->is_region_free[i]) {
|
if (!m_is_region_free[i]) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,14 +21,14 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KThreadQueue {
|
class KThreadQueue {
|
||||||
private:
|
private:
|
||||||
KThread::WaiterList wait_list;
|
KThread::WaiterList m_wait_list;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KThreadQueue() : wait_list() { /* ... */ }
|
constexpr ALWAYS_INLINE KThreadQueue() : m_wait_list() { /* ... */ }
|
||||||
|
|
||||||
bool IsEmpty() const { return this->wait_list.empty(); }
|
bool IsEmpty() const { return m_wait_list.empty(); }
|
||||||
|
|
||||||
KThread::WaiterList::iterator begin() { return this->wait_list.begin(); }
|
KThread::WaiterList::iterator begin() { return m_wait_list.begin(); }
|
||||||
KThread::WaiterList::iterator end() { return this->wait_list.end(); }
|
KThread::WaiterList::iterator end() { return m_wait_list.end(); }
|
||||||
|
|
||||||
bool SleepThread(KThread *t) {
|
bool SleepThread(KThread *t) {
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
|
@ -43,7 +43,7 @@ namespace ams::kern {
|
||||||
t->SetState(KThread::ThreadState_Waiting);
|
t->SetState(KThread::ThreadState_Waiting);
|
||||||
|
|
||||||
/* Add the thread to the queue. */
|
/* Add the thread to the queue. */
|
||||||
this->wait_list.push_back(*t);
|
m_wait_list.push_back(*t);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ namespace ams::kern {
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
|
|
||||||
/* Remove the thread from the queue. */
|
/* Remove the thread from the queue. */
|
||||||
this->wait_list.erase(this->wait_list.iterator_to(*t));
|
m_wait_list.erase(m_wait_list.iterator_to(*t));
|
||||||
|
|
||||||
/* Mark the thread as no longer sleeping. */
|
/* Mark the thread as no longer sleeping. */
|
||||||
t->SetState(KThread::ThreadState_Runnable);
|
t->SetState(KThread::ThreadState_Runnable);
|
||||||
|
@ -62,13 +62,13 @@ namespace ams::kern {
|
||||||
KThread *WakeupFrontThread() {
|
KThread *WakeupFrontThread() {
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
|
|
||||||
if (this->wait_list.empty()) {
|
if (m_wait_list.empty()) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
} else {
|
} else {
|
||||||
/* Remove the thread from the queue. */
|
/* Remove the thread from the queue. */
|
||||||
auto it = this->wait_list.begin();
|
auto it = m_wait_list.begin();
|
||||||
KThread *thread = std::addressof(*it);
|
KThread *thread = std::addressof(*it);
|
||||||
this->wait_list.erase(it);
|
m_wait_list.erase(it);
|
||||||
|
|
||||||
MESOSPHERE_ASSERT(thread->GetState() == KThread::ThreadState_Waiting);
|
MESOSPHERE_ASSERT(thread->GetState() == KThread::ThreadState_Waiting);
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KTimerTask : public util::IntrusiveRedBlackTreeBaseNode<KTimerTask> {
|
class KTimerTask : public util::IntrusiveRedBlackTreeBaseNode<KTimerTask> {
|
||||||
private:
|
private:
|
||||||
s64 time;
|
s64 m_time;
|
||||||
public:
|
public:
|
||||||
static constexpr ALWAYS_INLINE int Compare(const KTimerTask &lhs, const KTimerTask &rhs) {
|
static constexpr ALWAYS_INLINE int Compare(const KTimerTask &lhs, const KTimerTask &rhs) {
|
||||||
if (lhs.GetTime() < rhs.GetTime()) {
|
if (lhs.GetTime() < rhs.GetTime()) {
|
||||||
|
@ -30,14 +30,14 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KTimerTask() : time(0) { /* ... */ }
|
constexpr ALWAYS_INLINE KTimerTask() : m_time(0) { /* ... */ }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetTime(s64 t) {
|
constexpr ALWAYS_INLINE void SetTime(s64 t) {
|
||||||
this->time = t;
|
m_time = t;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE s64 GetTime() const {
|
constexpr ALWAYS_INLINE s64 GetTime() const {
|
||||||
return this->time;
|
return m_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void OnTimer() = 0;
|
virtual void OnTimer() = 0;
|
||||||
|
|
|
@ -23,15 +23,15 @@ namespace ams::kern {
|
||||||
class KTransferMemory final : public KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList> {
|
class KTransferMemory final : public KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject);
|
||||||
private:
|
private:
|
||||||
TYPED_STORAGE(KPageGroup) page_group;
|
TYPED_STORAGE(KPageGroup) m_page_group;
|
||||||
KProcess *owner;
|
KProcess *m_owner;
|
||||||
KProcessAddress address;
|
KProcessAddress m_address;
|
||||||
KLightLock lock;
|
KLightLock m_lock;
|
||||||
ams::svc::MemoryPermission owner_perm;
|
ams::svc::MemoryPermission m_owner_perm;
|
||||||
bool is_initialized;
|
bool m_is_initialized;
|
||||||
bool is_mapped;
|
bool m_is_mapped;
|
||||||
public:
|
public:
|
||||||
explicit KTransferMemory() : owner(nullptr), address(Null<KProcessAddress>), owner_perm(ams::svc::MemoryPermission_None), is_initialized(false), is_mapped(false) {
|
explicit KTransferMemory() : m_owner(nullptr), m_address(Null<KProcessAddress>), m_owner_perm(ams::svc::MemoryPermission_None), m_is_initialized(false), m_is_mapped(false) {
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,16 +40,16 @@ namespace ams::kern {
|
||||||
Result Initialize(KProcessAddress addr, size_t size, ams::svc::MemoryPermission own_perm);
|
Result Initialize(KProcessAddress addr, size_t size, ams::svc::MemoryPermission own_perm);
|
||||||
virtual void Finalize() override;
|
virtual void Finalize() override;
|
||||||
|
|
||||||
virtual bool IsInitialized() const override { return this->is_initialized; }
|
virtual bool IsInitialized() const override { return m_is_initialized; }
|
||||||
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(this->owner); }
|
virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast<uintptr_t>(m_owner); }
|
||||||
static void PostDestroy(uintptr_t arg);
|
static void PostDestroy(uintptr_t arg);
|
||||||
|
|
||||||
Result Map(KProcessAddress address, size_t size, ams::svc::MemoryPermission map_perm);
|
Result Map(KProcessAddress address, size_t size, ams::svc::MemoryPermission map_perm);
|
||||||
Result Unmap(KProcessAddress address, size_t size);
|
Result Unmap(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
KProcess *GetOwner() const { return this->owner; }
|
KProcess *GetOwner() const { return m_owner; }
|
||||||
KProcessAddress GetSourceAddress() { return this->address; }
|
KProcessAddress GetSourceAddress() { return m_address; }
|
||||||
size_t GetSize() const { return this->is_initialized ? GetReference(this->page_group).GetNumPages() * PageSize : 0; }
|
size_t GetSize() const { return m_is_initialized ? GetReference(m_page_group).GetNumPages() * PageSize : 0; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,13 +23,13 @@ namespace ams::kern {
|
||||||
template<bool Virtual, typename T>
|
template<bool Virtual, typename T>
|
||||||
class KTypedAddress {
|
class KTypedAddress {
|
||||||
private:
|
private:
|
||||||
uintptr_t address;
|
uintptr_t m_address;
|
||||||
public:
|
public:
|
||||||
/* Constructors. */
|
/* Constructors. */
|
||||||
constexpr ALWAYS_INLINE KTypedAddress() : address(0) { /* ... */ }
|
constexpr ALWAYS_INLINE KTypedAddress() : m_address(0) { /* ... */ }
|
||||||
constexpr ALWAYS_INLINE KTypedAddress(uintptr_t a) : address(a) { /* ... */ }
|
constexpr ALWAYS_INLINE KTypedAddress(uintptr_t a) : m_address(a) { /* ... */ }
|
||||||
template<typename U>
|
template<typename U>
|
||||||
constexpr ALWAYS_INLINE explicit KTypedAddress(U *ptr) : address(reinterpret_cast<uintptr_t>(ptr)) { /* ... */ }
|
constexpr ALWAYS_INLINE explicit KTypedAddress(U *ptr) : m_address(reinterpret_cast<uintptr_t>(ptr)) { /* ... */ }
|
||||||
|
|
||||||
/* Copy constructor. */
|
/* Copy constructor. */
|
||||||
constexpr ALWAYS_INLINE KTypedAddress(const KTypedAddress &rhs) = default;
|
constexpr ALWAYS_INLINE KTypedAddress(const KTypedAddress &rhs) = default;
|
||||||
|
@ -41,92 +41,92 @@ namespace ams::kern {
|
||||||
template<typename I>
|
template<typename I>
|
||||||
constexpr ALWAYS_INLINE KTypedAddress operator+(I rhs) const {
|
constexpr ALWAYS_INLINE KTypedAddress operator+(I rhs) const {
|
||||||
static_assert(std::is_integral<I>::value);
|
static_assert(std::is_integral<I>::value);
|
||||||
return this->address + rhs;
|
return m_address + rhs;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename I>
|
template<typename I>
|
||||||
constexpr ALWAYS_INLINE KTypedAddress operator-(I rhs) const {
|
constexpr ALWAYS_INLINE KTypedAddress operator-(I rhs) const {
|
||||||
static_assert(std::is_integral<I>::value);
|
static_assert(std::is_integral<I>::value);
|
||||||
return this->address - rhs;
|
return m_address - rhs;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE ptrdiff_t operator-(KTypedAddress rhs) const {
|
constexpr ALWAYS_INLINE ptrdiff_t operator-(KTypedAddress rhs) const {
|
||||||
return this->address - rhs.address;
|
return m_address - rhs.m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename I>
|
template<typename I>
|
||||||
constexpr ALWAYS_INLINE KTypedAddress operator+=(I rhs) {
|
constexpr ALWAYS_INLINE KTypedAddress operator+=(I rhs) {
|
||||||
static_assert(std::is_integral<I>::value);
|
static_assert(std::is_integral<I>::value);
|
||||||
this->address += rhs;
|
m_address += rhs;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename I>
|
template<typename I>
|
||||||
constexpr ALWAYS_INLINE KTypedAddress operator-=(I rhs) {
|
constexpr ALWAYS_INLINE KTypedAddress operator-=(I rhs) {
|
||||||
static_assert(std::is_integral<I>::value);
|
static_assert(std::is_integral<I>::value);
|
||||||
this->address -= rhs;
|
m_address -= rhs;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Logical operators. */
|
/* Logical operators. */
|
||||||
constexpr ALWAYS_INLINE uintptr_t operator&(uintptr_t mask) const {
|
constexpr ALWAYS_INLINE uintptr_t operator&(uintptr_t mask) const {
|
||||||
return this->address & mask;
|
return m_address & mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE uintptr_t operator|(uintptr_t mask) const {
|
constexpr ALWAYS_INLINE uintptr_t operator|(uintptr_t mask) const {
|
||||||
return this->address | mask;
|
return m_address | mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE uintptr_t operator<<(int shift) const {
|
constexpr ALWAYS_INLINE uintptr_t operator<<(int shift) const {
|
||||||
return this->address << shift;
|
return m_address << shift;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE uintptr_t operator>>(int shift) const {
|
constexpr ALWAYS_INLINE uintptr_t operator>>(int shift) const {
|
||||||
return this->address >> shift;
|
return m_address >> shift;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename U>
|
template<typename U>
|
||||||
constexpr ALWAYS_INLINE size_t operator/(U size) const { return this->address / size; }
|
constexpr ALWAYS_INLINE size_t operator/(U size) const { return m_address / size; }
|
||||||
|
|
||||||
/* constexpr ALWAYS_INLINE uintptr_t operator%(U align) const { return this->address % align; } */
|
/* constexpr ALWAYS_INLINE uintptr_t operator%(U align) const { return m_address % align; } */
|
||||||
|
|
||||||
/* Comparison operators. */
|
/* Comparison operators. */
|
||||||
constexpr ALWAYS_INLINE bool operator==(KTypedAddress rhs) const {
|
constexpr ALWAYS_INLINE bool operator==(KTypedAddress rhs) const {
|
||||||
return this->address == rhs.address;
|
return m_address == rhs.m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool operator!=(KTypedAddress rhs) const {
|
constexpr ALWAYS_INLINE bool operator!=(KTypedAddress rhs) const {
|
||||||
return this->address != rhs.address;
|
return m_address != rhs.m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool operator<(KTypedAddress rhs) const {
|
constexpr ALWAYS_INLINE bool operator<(KTypedAddress rhs) const {
|
||||||
return this->address < rhs.address;
|
return m_address < rhs.m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool operator<=(KTypedAddress rhs) const {
|
constexpr ALWAYS_INLINE bool operator<=(KTypedAddress rhs) const {
|
||||||
return this->address <= rhs.address;
|
return m_address <= rhs.m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool operator>(KTypedAddress rhs) const {
|
constexpr ALWAYS_INLINE bool operator>(KTypedAddress rhs) const {
|
||||||
return this->address > rhs.address;
|
return m_address > rhs.m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool operator>=(KTypedAddress rhs) const {
|
constexpr ALWAYS_INLINE bool operator>=(KTypedAddress rhs) const {
|
||||||
return this->address >= rhs.address;
|
return m_address >= rhs.m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* For convenience, also define comparison operators versus uintptr_t. */
|
/* For convenience, also define comparison operators versus uintptr_t. */
|
||||||
constexpr ALWAYS_INLINE bool operator==(uintptr_t rhs) const {
|
constexpr ALWAYS_INLINE bool operator==(uintptr_t rhs) const {
|
||||||
return this->address == rhs;
|
return m_address == rhs;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool operator!=(uintptr_t rhs) const {
|
constexpr ALWAYS_INLINE bool operator!=(uintptr_t rhs) const {
|
||||||
return this->address != rhs;
|
return m_address != rhs;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allow getting the address explicitly, for use in accessors. */
|
/* Allow getting the address explicitly, for use in accessors. */
|
||||||
constexpr ALWAYS_INLINE uintptr_t GetValue() const {
|
constexpr ALWAYS_INLINE uintptr_t GetValue() const {
|
||||||
return this->address;
|
return m_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -21,57 +21,57 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KUnsafeMemory {
|
class KUnsafeMemory {
|
||||||
private:
|
private:
|
||||||
mutable KLightLock lock;
|
mutable KLightLock m_lock;
|
||||||
size_t limit_size;
|
size_t m_limit_size;
|
||||||
size_t current_size;
|
size_t m_current_size;
|
||||||
public:
|
public:
|
||||||
constexpr KUnsafeMemory() : lock(), limit_size(), current_size() { /* ... */ }
|
constexpr KUnsafeMemory() : m_lock(), m_limit_size(), m_current_size() { /* ... */ }
|
||||||
|
|
||||||
bool TryReserve(size_t size) {
|
bool TryReserve(size_t size) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
/* Test for overflow. */
|
/* Test for overflow. */
|
||||||
if (this->current_size > this->current_size + size) {
|
if (m_current_size > m_current_size + size) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Test for limit allowance. */
|
/* Test for limit allowance. */
|
||||||
if (this->current_size + size > this->limit_size) {
|
if (m_current_size + size > m_limit_size) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reserve the size. */
|
/* Reserve the size. */
|
||||||
this->current_size += size;
|
m_current_size += size;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Release(size_t size) {
|
void Release(size_t size) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
MESOSPHERE_ABORT_UNLESS(this->current_size >= size);
|
MESOSPHERE_ABORT_UNLESS(m_current_size >= size);
|
||||||
this->current_size -= size;
|
m_current_size -= size;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetLimitSize() const {
|
size_t GetLimitSize() const {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
return this->limit_size;
|
return m_limit_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetCurrentSize() const {
|
size_t GetCurrentSize() const {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
return this->current_size;
|
return m_current_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SetLimitSize(size_t size) {
|
Result SetLimitSize(size_t size) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
R_UNLESS(size >= this->current_size, svc::ResultLimitReached());
|
R_UNLESS(size >= m_current_size, svc::ResultLimitReached());
|
||||||
this->limit_size = size;
|
m_limit_size = size;
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -22,10 +22,10 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KWaitObject : public KTimerTask {
|
class KWaitObject : public KTimerTask {
|
||||||
private:
|
private:
|
||||||
KThread::WaiterList wait_list;
|
KThread::WaiterList m_wait_list;
|
||||||
bool timer_used;
|
bool m_timer_used;
|
||||||
public:
|
public:
|
||||||
constexpr KWaitObject() : wait_list(), timer_used() { /* ... */ }
|
constexpr KWaitObject() : m_wait_list(), m_timer_used() { /* ... */ }
|
||||||
|
|
||||||
virtual void OnTimer() override;
|
virtual void OnTimer() override;
|
||||||
Result Synchronize(s64 timeout);
|
Result Synchronize(s64 timeout);
|
||||||
|
|
|
@ -20,12 +20,12 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KWorkerTask {
|
class KWorkerTask {
|
||||||
private:
|
private:
|
||||||
KWorkerTask *next_task;
|
KWorkerTask *m_next_task;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KWorkerTask() : next_task(nullptr) { /* ... */ }
|
constexpr ALWAYS_INLINE KWorkerTask() : m_next_task(nullptr) { /* ... */ }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KWorkerTask *GetNextTask() const { return this->next_task; }
|
constexpr ALWAYS_INLINE KWorkerTask *GetNextTask() const { return m_next_task; }
|
||||||
constexpr ALWAYS_INLINE void SetNextTask(KWorkerTask *task) { this->next_task = task; }
|
constexpr ALWAYS_INLINE void SetNextTask(KWorkerTask *task) { m_next_task = task; }
|
||||||
|
|
||||||
virtual void DoWorkerTask() = 0;
|
virtual void DoWorkerTask() = 0;
|
||||||
};
|
};
|
||||||
|
|
|
@ -30,11 +30,11 @@ namespace ams::kern {
|
||||||
WorkerType_Count,
|
WorkerType_Count,
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
KWorkerTask *head_task;
|
KWorkerTask *m_head_task;
|
||||||
KWorkerTask *tail_task;
|
KWorkerTask *m_tail_task;
|
||||||
KThread *thread;
|
KThread *m_thread;
|
||||||
WorkerType type;
|
WorkerType m_type;
|
||||||
bool active;
|
bool m_active;
|
||||||
private:
|
private:
|
||||||
static void ThreadFunction(uintptr_t arg);
|
static void ThreadFunction(uintptr_t arg);
|
||||||
void ThreadFunctionImpl();
|
void ThreadFunctionImpl();
|
||||||
|
@ -42,7 +42,7 @@ namespace ams::kern {
|
||||||
KWorkerTask *GetTask();
|
KWorkerTask *GetTask();
|
||||||
void AddTask(KWorkerTask *task);
|
void AddTask(KWorkerTask *task);
|
||||||
public:
|
public:
|
||||||
constexpr KWorkerTaskManager() : head_task(), tail_task(), thread(), type(WorkerType_Count), active() { /* ... */ }
|
constexpr KWorkerTaskManager() : m_head_task(), m_tail_task(), m_thread(), m_type(WorkerType_Count), m_active() { /* ... */ }
|
||||||
|
|
||||||
NOINLINE void Initialize(WorkerType wt, s32 priority);
|
NOINLINE void Initialize(WorkerType wt, s32 priority);
|
||||||
static void AddTask(WorkerType type, KWorkerTask *task);
|
static void AddTask(WorkerType type, KWorkerTask *task);
|
||||||
|
|
|
@ -25,9 +25,9 @@ namespace ams::kern {
|
||||||
class KWritableEvent final : public KAutoObjectWithSlabHeapAndContainer<KWritableEvent, KAutoObjectWithList> {
|
class KWritableEvent final : public KAutoObjectWithSlabHeapAndContainer<KWritableEvent, KAutoObjectWithList> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject);
|
||||||
private:
|
private:
|
||||||
KEvent *parent;
|
KEvent *m_parent;
|
||||||
public:
|
public:
|
||||||
constexpr explicit KWritableEvent() : parent(nullptr) { /* ... */ }
|
constexpr explicit KWritableEvent() : m_parent(nullptr) { /* ... */ }
|
||||||
virtual ~KWritableEvent() { /* ... */ }
|
virtual ~KWritableEvent() { /* ... */ }
|
||||||
|
|
||||||
virtual void Destroy() override;
|
virtual void Destroy() override;
|
||||||
|
@ -38,7 +38,7 @@ namespace ams::kern {
|
||||||
Result Signal();
|
Result Signal();
|
||||||
Result Clear();
|
Result Clear();
|
||||||
|
|
||||||
constexpr KEvent *GetParent() const { return this->parent; }
|
constexpr KEvent *GetParent() const { return m_parent; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,20 +39,20 @@ namespace ams::kern {
|
||||||
NON_COPYABLE(KScopedInterruptDisable);
|
NON_COPYABLE(KScopedInterruptDisable);
|
||||||
NON_MOVEABLE(KScopedInterruptDisable);
|
NON_MOVEABLE(KScopedInterruptDisable);
|
||||||
private:
|
private:
|
||||||
u32 prev_intr_state;
|
u32 m_prev_intr_state;
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE KScopedInterruptDisable() : prev_intr_state(KInterruptManager::DisableInterrupts()) { /* ... */ }
|
ALWAYS_INLINE KScopedInterruptDisable() : m_prev_intr_state(KInterruptManager::DisableInterrupts()) { /* ... */ }
|
||||||
ALWAYS_INLINE ~KScopedInterruptDisable() { KInterruptManager::RestoreInterrupts(prev_intr_state); }
|
ALWAYS_INLINE ~KScopedInterruptDisable() { KInterruptManager::RestoreInterrupts(m_prev_intr_state); }
|
||||||
};
|
};
|
||||||
|
|
||||||
class KScopedInterruptEnable {
|
class KScopedInterruptEnable {
|
||||||
NON_COPYABLE(KScopedInterruptEnable);
|
NON_COPYABLE(KScopedInterruptEnable);
|
||||||
NON_MOVEABLE(KScopedInterruptEnable);
|
NON_MOVEABLE(KScopedInterruptEnable);
|
||||||
private:
|
private:
|
||||||
u32 prev_intr_state;
|
u32 m_prev_intr_state;
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE KScopedInterruptEnable() : prev_intr_state(KInterruptManager::EnableInterrupts()) { /* ... */ }
|
ALWAYS_INLINE KScopedInterruptEnable() : m_prev_intr_state(KInterruptManager::EnableInterrupts()) { /* ... */ }
|
||||||
ALWAYS_INLINE ~KScopedInterruptEnable() { KInterruptManager::RestoreInterrupts(prev_intr_state); }
|
ALWAYS_INLINE ~KScopedInterruptEnable() { KInterruptManager::RestoreInterrupts(m_prev_intr_state); }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,28 +108,28 @@ namespace ams::kern::svc {
|
||||||
using CT = typename std::remove_pointer<_T>::type;
|
using CT = typename std::remove_pointer<_T>::type;
|
||||||
using T = typename std::remove_const<CT>::type;
|
using T = typename std::remove_const<CT>::type;
|
||||||
private:
|
private:
|
||||||
CT *ptr;
|
CT *m_ptr;
|
||||||
private:
|
private:
|
||||||
Result CopyToImpl(void *p, size_t size) const {
|
Result CopyToImpl(void *p, size_t size) const {
|
||||||
return Traits::CopyFromUserspace(p, this->ptr, size);
|
return Traits::CopyFromUserspace(p, m_ptr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result CopyFromImpl(const void *p, size_t size) const {
|
Result CopyFromImpl(const void *p, size_t size) const {
|
||||||
return Traits::CopyToUserspace(this->ptr, p, size);
|
return Traits::CopyToUserspace(m_ptr, p, size);
|
||||||
}
|
}
|
||||||
protected:
|
protected:
|
||||||
Result CopyTo(T *p) const { return this->CopyToImpl(p, sizeof(*p)); }
|
Result CopyTo(T *p) const { return this->CopyToImpl(p, sizeof(*p)); }
|
||||||
Result CopyFrom(const T *p) const { return this->CopyFromImpl(p, sizeof(*p)); }
|
Result CopyFrom(const T *p) const { return this->CopyFromImpl(p, sizeof(*p)); }
|
||||||
|
|
||||||
Result CopyArrayElementTo(T *p, size_t index) const { return Traits::CopyFromUserspace(p, this->ptr + index, sizeof(*p)); }
|
Result CopyArrayElementTo(T *p, size_t index) const { return Traits::CopyFromUserspace(p, m_ptr + index, sizeof(*p)); }
|
||||||
Result CopyArrayElementFrom(const T *p, size_t index) const { return Traits::CopyToUserspace(this->ptr + index, p, sizeof(*p)); }
|
Result CopyArrayElementFrom(const T *p, size_t index) const { return Traits::CopyToUserspace(m_ptr + index, p, sizeof(*p)); }
|
||||||
|
|
||||||
Result CopyArrayTo(T *arr, size_t count) const { return this->CopyToImpl(arr, sizeof(*arr) * count); }
|
Result CopyArrayTo(T *arr, size_t count) const { return this->CopyToImpl(arr, sizeof(*arr) * count); }
|
||||||
Result CopyArrayFrom(const T *arr, size_t count) const { return this->CopyFromImpl(arr, sizeof(*arr) * count); }
|
Result CopyArrayFrom(const T *arr, size_t count) const { return this->CopyFromImpl(arr, sizeof(*arr) * count); }
|
||||||
|
|
||||||
constexpr bool IsNull() const { return this->ptr == nullptr; }
|
constexpr bool IsNull() const { return m_ptr == nullptr; }
|
||||||
|
|
||||||
constexpr CT *GetUnsafePointer() const { return this->ptr; }
|
constexpr CT *GetUnsafePointer() const { return m_ptr; }
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -18,47 +18,47 @@
|
||||||
namespace ams::kern::arch::arm {
|
namespace ams::kern::arch::arm {
|
||||||
|
|
||||||
void KInterruptController::SetupInterruptLines(s32 core_id) const {
|
void KInterruptController::SetupInterruptLines(s32 core_id) const {
|
||||||
const size_t ITLines = (core_id == 0) ? 32 * ((this->gicd->typer & 0x1F) + 1) : NumLocalInterrupts;
|
const size_t ITLines = (core_id == 0) ? 32 * ((m_gicd->typer & 0x1F) + 1) : NumLocalInterrupts;
|
||||||
|
|
||||||
for (size_t i = 0; i < ITLines / 32; i++) {
|
for (size_t i = 0; i < ITLines / 32; i++) {
|
||||||
this->gicd->icenabler[i] = 0xFFFFFFFF;
|
m_gicd->icenabler[i] = 0xFFFFFFFF;
|
||||||
this->gicd->icpendr[i] = 0xFFFFFFFF;
|
m_gicd->icpendr[i] = 0xFFFFFFFF;
|
||||||
this->gicd->icactiver[i] = 0xFFFFFFFF;
|
m_gicd->icactiver[i] = 0xFFFFFFFF;
|
||||||
this->gicd->igroupr[i] = 0;
|
m_gicd->igroupr[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < ITLines; i++) {
|
for (size_t i = 0; i < ITLines; i++) {
|
||||||
this->gicd->ipriorityr.bytes[i] = 0xFF;
|
m_gicd->ipriorityr.bytes[i] = 0xFF;
|
||||||
this->gicd->itargetsr.bytes[i] = 0x00;
|
m_gicd->itargetsr.bytes[i] = 0x00;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < ITLines / 16; i++) {
|
for (size_t i = 0; i < ITLines / 16; i++) {
|
||||||
this->gicd->icfgr[i] = 0x00000000;
|
m_gicd->icfgr[i] = 0x00000000;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KInterruptController::Initialize(s32 core_id) {
|
void KInterruptController::Initialize(s32 core_id) {
|
||||||
/* Setup pointers to ARM mmio. */
|
/* Setup pointers to ARM mmio. */
|
||||||
this->gicd = GetPointer<volatile GicDistributor >(KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_InterruptDistributor));
|
m_gicd = GetPointer<volatile GicDistributor >(KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_InterruptDistributor));
|
||||||
this->gicc = GetPointer<volatile GicCpuInterface>(KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_InterruptCpuInterface));
|
m_gicc = GetPointer<volatile GicCpuInterface>(KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_InterruptCpuInterface));
|
||||||
|
|
||||||
/* Clear CTLRs. */
|
/* Clear CTLRs. */
|
||||||
this->gicc->ctlr = 0;
|
m_gicc->ctlr = 0;
|
||||||
if (core_id == 0) {
|
if (core_id == 0) {
|
||||||
this->gicd->ctlr = 0;
|
m_gicd->ctlr = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
this->gicc->pmr = 0;
|
m_gicc->pmr = 0;
|
||||||
this->gicc->bpr = 7;
|
m_gicc->bpr = 7;
|
||||||
|
|
||||||
/* Setup all interrupt lines. */
|
/* Setup all interrupt lines. */
|
||||||
SetupInterruptLines(core_id);
|
SetupInterruptLines(core_id);
|
||||||
|
|
||||||
/* Set CTLRs. */
|
/* Set CTLRs. */
|
||||||
if (core_id == 0) {
|
if (core_id == 0) {
|
||||||
this->gicd->ctlr = 1;
|
m_gicd->ctlr = 1;
|
||||||
}
|
}
|
||||||
this->gicc->ctlr = 1;
|
m_gicc->ctlr = 1;
|
||||||
|
|
||||||
/* Set the mask for this core. */
|
/* Set the mask for this core. */
|
||||||
SetGicMask(core_id);
|
SetGicMask(core_id);
|
||||||
|
@ -70,9 +70,9 @@ namespace ams::kern::arch::arm {
|
||||||
void KInterruptController::Finalize(s32 core_id) {
|
void KInterruptController::Finalize(s32 core_id) {
|
||||||
/* Clear CTLRs. */
|
/* Clear CTLRs. */
|
||||||
if (core_id == 0) {
|
if (core_id == 0) {
|
||||||
this->gicd->ctlr = 0;
|
m_gicd->ctlr = 0;
|
||||||
}
|
}
|
||||||
this->gicc->ctlr = 0;
|
m_gicc->ctlr = 0;
|
||||||
|
|
||||||
/* Set the priority level. */
|
/* Set the priority level. */
|
||||||
SetPriorityLevel(PriorityLevel_High);
|
SetPriorityLevel(PriorityLevel_High);
|
||||||
|
@ -85,27 +85,27 @@ namespace ams::kern::arch::arm {
|
||||||
/* Save isenabler. */
|
/* Save isenabler. */
|
||||||
for (size_t i = 0; i < util::size(state->isenabler); ++i) {
|
for (size_t i = 0; i < util::size(state->isenabler); ++i) {
|
||||||
constexpr size_t Offset = 0;
|
constexpr size_t Offset = 0;
|
||||||
state->isenabler[i] = this->gicd->isenabler[i + Offset];
|
state->isenabler[i] = m_gicd->isenabler[i + Offset];
|
||||||
this->gicd->isenabler[i + Offset] = 0xFFFFFFFF;
|
m_gicd->isenabler[i + Offset] = 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save ipriorityr. */
|
/* Save ipriorityr. */
|
||||||
for (size_t i = 0; i < util::size(state->ipriorityr); ++i) {
|
for (size_t i = 0; i < util::size(state->ipriorityr); ++i) {
|
||||||
constexpr size_t Offset = 0;
|
constexpr size_t Offset = 0;
|
||||||
state->ipriorityr[i] = this->gicd->ipriorityr.words[i + Offset];
|
state->ipriorityr[i] = m_gicd->ipriorityr.words[i + Offset];
|
||||||
this->gicd->ipriorityr.words[i + Offset] = 0xFFFFFFFF;
|
m_gicd->ipriorityr.words[i + Offset] = 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save itargetsr. */
|
/* Save itargetsr. */
|
||||||
for (size_t i = 0; i < util::size(state->itargetsr); ++i) {
|
for (size_t i = 0; i < util::size(state->itargetsr); ++i) {
|
||||||
constexpr size_t Offset = 0;
|
constexpr size_t Offset = 0;
|
||||||
state->itargetsr[i] = this->gicd->itargetsr.words[i + Offset];
|
state->itargetsr[i] = m_gicd->itargetsr.words[i + Offset];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save icfgr. */
|
/* Save icfgr. */
|
||||||
for (size_t i = 0; i < util::size(state->icfgr); ++i) {
|
for (size_t i = 0; i < util::size(state->icfgr); ++i) {
|
||||||
constexpr size_t Offset = 0;
|
constexpr size_t Offset = 0;
|
||||||
state->icfgr[i] = this->gicd->icfgr[i + Offset];
|
state->icfgr[i] = m_gicd->icfgr[i + Offset];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,27 +113,27 @@ namespace ams::kern::arch::arm {
|
||||||
/* Save isenabler. */
|
/* Save isenabler. */
|
||||||
for (size_t i = 0; i < util::size(state->isenabler); ++i) {
|
for (size_t i = 0; i < util::size(state->isenabler); ++i) {
|
||||||
constexpr size_t Offset = util::size(LocalState{}.isenabler);
|
constexpr size_t Offset = util::size(LocalState{}.isenabler);
|
||||||
state->isenabler[i] = this->gicd->isenabler[i + Offset];
|
state->isenabler[i] = m_gicd->isenabler[i + Offset];
|
||||||
this->gicd->isenabler[i + Offset] = 0xFFFFFFFF;
|
m_gicd->isenabler[i + Offset] = 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save ipriorityr. */
|
/* Save ipriorityr. */
|
||||||
for (size_t i = 0; i < util::size(state->ipriorityr); ++i) {
|
for (size_t i = 0; i < util::size(state->ipriorityr); ++i) {
|
||||||
constexpr size_t Offset = util::size(LocalState{}.ipriorityr);
|
constexpr size_t Offset = util::size(LocalState{}.ipriorityr);
|
||||||
state->ipriorityr[i] = this->gicd->ipriorityr.words[i + Offset];
|
state->ipriorityr[i] = m_gicd->ipriorityr.words[i + Offset];
|
||||||
this->gicd->ipriorityr.words[i + Offset] = 0xFFFFFFFF;
|
m_gicd->ipriorityr.words[i + Offset] = 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save itargetsr. */
|
/* Save itargetsr. */
|
||||||
for (size_t i = 0; i < util::size(state->itargetsr); ++i) {
|
for (size_t i = 0; i < util::size(state->itargetsr); ++i) {
|
||||||
constexpr size_t Offset = util::size(LocalState{}.itargetsr);
|
constexpr size_t Offset = util::size(LocalState{}.itargetsr);
|
||||||
state->itargetsr[i] = this->gicd->itargetsr.words[i + Offset];
|
state->itargetsr[i] = m_gicd->itargetsr.words[i + Offset];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save icfgr. */
|
/* Save icfgr. */
|
||||||
for (size_t i = 0; i < util::size(state->icfgr); ++i) {
|
for (size_t i = 0; i < util::size(state->icfgr); ++i) {
|
||||||
constexpr size_t Offset = util::size(LocalState{}.icfgr);
|
constexpr size_t Offset = util::size(LocalState{}.icfgr);
|
||||||
state->icfgr[i] = this->gicd->icfgr[i + Offset];
|
state->icfgr[i] = m_gicd->icfgr[i + Offset];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,26 +141,26 @@ namespace ams::kern::arch::arm {
|
||||||
/* Restore ipriorityr. */
|
/* Restore ipriorityr. */
|
||||||
for (size_t i = 0; i < util::size(state->ipriorityr); ++i) {
|
for (size_t i = 0; i < util::size(state->ipriorityr); ++i) {
|
||||||
constexpr size_t Offset = 0;
|
constexpr size_t Offset = 0;
|
||||||
this->gicd->ipriorityr.words[i + Offset] = state->ipriorityr[i];
|
m_gicd->ipriorityr.words[i + Offset] = state->ipriorityr[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Restore itargetsr. */
|
/* Restore itargetsr. */
|
||||||
for (size_t i = 0; i < util::size(state->itargetsr); ++i) {
|
for (size_t i = 0; i < util::size(state->itargetsr); ++i) {
|
||||||
constexpr size_t Offset = 0;
|
constexpr size_t Offset = 0;
|
||||||
this->gicd->itargetsr.words[i + Offset] = state->itargetsr[i];
|
m_gicd->itargetsr.words[i + Offset] = state->itargetsr[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Restore icfgr. */
|
/* Restore icfgr. */
|
||||||
for (size_t i = 0; i < util::size(state->icfgr); ++i) {
|
for (size_t i = 0; i < util::size(state->icfgr); ++i) {
|
||||||
constexpr size_t Offset = 0;
|
constexpr size_t Offset = 0;
|
||||||
this->gicd->icfgr[i + Offset] = state->icfgr[i];
|
m_gicd->icfgr[i + Offset] = state->icfgr[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Restore isenabler. */
|
/* Restore isenabler. */
|
||||||
for (size_t i = 0; i < util::size(state->isenabler); ++i) {
|
for (size_t i = 0; i < util::size(state->isenabler); ++i) {
|
||||||
constexpr size_t Offset = 0;
|
constexpr size_t Offset = 0;
|
||||||
this->gicd->icenabler[i + Offset] = 0xFFFFFFFF;
|
m_gicd->icenabler[i + Offset] = 0xFFFFFFFF;
|
||||||
this->gicd->isenabler[i + Offset] = state->isenabler[i];
|
m_gicd->isenabler[i + Offset] = state->isenabler[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -168,26 +168,26 @@ namespace ams::kern::arch::arm {
|
||||||
/* Restore ipriorityr. */
|
/* Restore ipriorityr. */
|
||||||
for (size_t i = 0; i < util::size(state->ipriorityr); ++i) {
|
for (size_t i = 0; i < util::size(state->ipriorityr); ++i) {
|
||||||
constexpr size_t Offset = util::size(LocalState{}.ipriorityr);
|
constexpr size_t Offset = util::size(LocalState{}.ipriorityr);
|
||||||
this->gicd->ipriorityr.words[i + Offset] = state->ipriorityr[i];
|
m_gicd->ipriorityr.words[i + Offset] = state->ipriorityr[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Restore itargetsr. */
|
/* Restore itargetsr. */
|
||||||
for (size_t i = 0; i < util::size(state->itargetsr); ++i) {
|
for (size_t i = 0; i < util::size(state->itargetsr); ++i) {
|
||||||
constexpr size_t Offset = util::size(LocalState{}.itargetsr);
|
constexpr size_t Offset = util::size(LocalState{}.itargetsr);
|
||||||
this->gicd->itargetsr.words[i + Offset] = state->itargetsr[i];
|
m_gicd->itargetsr.words[i + Offset] = state->itargetsr[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Restore icfgr. */
|
/* Restore icfgr. */
|
||||||
for (size_t i = 0; i < util::size(state->icfgr); ++i) {
|
for (size_t i = 0; i < util::size(state->icfgr); ++i) {
|
||||||
constexpr size_t Offset = util::size(LocalState{}.icfgr);
|
constexpr size_t Offset = util::size(LocalState{}.icfgr);
|
||||||
this->gicd->icfgr[i + Offset] = state->icfgr[i];
|
m_gicd->icfgr[i + Offset] = state->icfgr[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Restore isenabler. */
|
/* Restore isenabler. */
|
||||||
for (size_t i = 0; i < util::size(state->isenabler); ++i) {
|
for (size_t i = 0; i < util::size(state->isenabler); ++i) {
|
||||||
constexpr size_t Offset = util::size(LocalState{}.isenabler);
|
constexpr size_t Offset = util::size(LocalState{}.isenabler);
|
||||||
this->gicd->icenabler[i + Offset] = 0xFFFFFFFF;
|
m_gicd->icenabler[i + Offset] = 0xFFFFFFFF;
|
||||||
this->gicd->isenabler[i + Offset] = state->isenabler[i];
|
m_gicd->isenabler[i + Offset] = state->isenabler[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,38 +46,38 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
private:
|
private:
|
||||||
static inline KLightLock s_lock;
|
static inline KLightLock s_lock;
|
||||||
private:
|
private:
|
||||||
u64 counter;
|
u64 m_counter;
|
||||||
s32 which;
|
s32 m_which;
|
||||||
bool done;
|
bool m_done;
|
||||||
public:
|
public:
|
||||||
constexpr KPerformanceCounterInterruptHandler() : KInterruptHandler(), counter(), which(), done() { /* ... */ }
|
constexpr KPerformanceCounterInterruptHandler() : KInterruptHandler(), m_counter(), m_which(), m_done() { /* ... */ }
|
||||||
|
|
||||||
static KLightLock &GetLock() { return s_lock; }
|
static KLightLock &GetLock() { return s_lock; }
|
||||||
|
|
||||||
void Setup(s32 w) {
|
void Setup(s32 w) {
|
||||||
this->done = false;
|
m_done = false;
|
||||||
this->which = w;
|
m_which = w;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Wait() {
|
void Wait() {
|
||||||
while (!this->done) {
|
while (!m_done) {
|
||||||
cpu::Yield();
|
cpu::Yield();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GetCounter() const { return this->counter; }
|
u64 GetCounter() const { return m_counter; }
|
||||||
|
|
||||||
/* Nintendo misuses this per their own API, but it's functional. */
|
/* Nintendo misuses this per their own API, but it's functional. */
|
||||||
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
|
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
|
||||||
MESOSPHERE_UNUSED(interrupt_id);
|
MESOSPHERE_UNUSED(interrupt_id);
|
||||||
|
|
||||||
if (this->which < 0) {
|
if (m_which < 0) {
|
||||||
this->counter = cpu::GetCycleCounter();
|
m_counter = cpu::GetCycleCounter();
|
||||||
} else {
|
} else {
|
||||||
this->counter = cpu::GetPerformanceCounter(this->which);
|
m_counter = cpu::GetPerformanceCounter(m_which);
|
||||||
}
|
}
|
||||||
DataMemoryBarrier();
|
DataMemoryBarrier();
|
||||||
this->done = true;
|
m_done = true;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -93,11 +93,11 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
FlushDataCache,
|
FlushDataCache,
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
KLightLock lock;
|
KLightLock m_lock;
|
||||||
KLightLock cv_lock;
|
KLightLock m_cv_lock;
|
||||||
KLightConditionVariable cv;
|
KLightConditionVariable m_cv;
|
||||||
std::atomic<u64> target_cores;
|
std::atomic<u64> m_target_cores;
|
||||||
volatile Operation operation;
|
volatile Operation m_operation;
|
||||||
private:
|
private:
|
||||||
static void ThreadFunction(uintptr_t _this) {
|
static void ThreadFunction(uintptr_t _this) {
|
||||||
reinterpret_cast<KCacheHelperInterruptHandler *>(_this)->ThreadFunctionImpl();
|
reinterpret_cast<KCacheHelperInterruptHandler *>(_this)->ThreadFunctionImpl();
|
||||||
|
@ -108,9 +108,9 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
while (true) {
|
while (true) {
|
||||||
/* Wait for a request to come in. */
|
/* Wait for a request to come in. */
|
||||||
{
|
{
|
||||||
KScopedLightLock lk(this->cv_lock);
|
KScopedLightLock lk(m_cv_lock);
|
||||||
while ((this->target_cores & (1ul << core_id)) == 0) {
|
while ((m_target_cores & (1ul << core_id)) == 0) {
|
||||||
this->cv.Wait(std::addressof(this->cv_lock));
|
m_cv.Wait(std::addressof(m_cv_lock));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,9 +119,9 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
|
|
||||||
/* Broadcast, if there's nothing pending. */
|
/* Broadcast, if there's nothing pending. */
|
||||||
{
|
{
|
||||||
KScopedLightLock lk(this->cv_lock);
|
KScopedLightLock lk(m_cv_lock);
|
||||||
if (this->target_cores == 0) {
|
if (m_target_cores == 0) {
|
||||||
this->cv.Broadcast();
|
m_cv.Broadcast();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,7 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
|
|
||||||
void ProcessOperation();
|
void ProcessOperation();
|
||||||
public:
|
public:
|
||||||
constexpr KCacheHelperInterruptHandler() : KInterruptHandler(), lock(), cv_lock(), cv(), target_cores(), operation(Operation::Idle) { /* ... */ }
|
constexpr KCacheHelperInterruptHandler() : KInterruptHandler(), m_lock(), m_cv_lock(), m_cv(), m_target_cores(), m_operation(Operation::Idle) { /* ... */ }
|
||||||
|
|
||||||
void Initialize(s32 core_id) {
|
void Initialize(s32 core_id) {
|
||||||
/* Reserve a thread from the system limit. */
|
/* Reserve a thread from the system limit. */
|
||||||
|
@ -154,7 +154,7 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
}
|
}
|
||||||
|
|
||||||
void RequestOperation(Operation op) {
|
void RequestOperation(Operation op) {
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
/* Create core masks for us to use. */
|
/* Create core masks for us to use. */
|
||||||
constexpr u64 AllCoresMask = (1ul << cpu::NumCores) - 1ul;
|
constexpr u64 AllCoresMask = (1ul << cpu::NumCores) - 1ul;
|
||||||
|
@ -162,48 +162,48 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
|
|
||||||
if ((op == Operation::InstructionMemoryBarrier) || (Kernel::GetState() == Kernel::State::Initializing)) {
|
if ((op == Operation::InstructionMemoryBarrier) || (Kernel::GetState() == Kernel::State::Initializing)) {
|
||||||
/* Check that there's no on-going operation. */
|
/* Check that there's no on-going operation. */
|
||||||
MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle);
|
MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle);
|
||||||
MESOSPHERE_ABORT_UNLESS(this->target_cores == 0);
|
MESOSPHERE_ABORT_UNLESS(m_target_cores == 0);
|
||||||
|
|
||||||
/* Set operation. */
|
/* Set operation. */
|
||||||
this->operation = op;
|
m_operation = op;
|
||||||
|
|
||||||
/* For certain operations, we want to send an interrupt. */
|
/* For certain operations, we want to send an interrupt. */
|
||||||
this->target_cores = other_cores_mask;
|
m_target_cores = other_cores_mask;
|
||||||
|
|
||||||
const u64 target_mask = this->target_cores;
|
const u64 target_mask = m_target_cores;
|
||||||
DataSynchronizationBarrier();
|
DataSynchronizationBarrier();
|
||||||
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
|
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
|
||||||
|
|
||||||
this->ProcessOperation();
|
this->ProcessOperation();
|
||||||
while (this->target_cores != 0) {
|
while (m_target_cores != 0) {
|
||||||
cpu::Yield();
|
cpu::Yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Go idle again. */
|
/* Go idle again. */
|
||||||
this->operation = Operation::Idle;
|
m_operation = Operation::Idle;
|
||||||
} else {
|
} else {
|
||||||
/* Lock condvar so that we can send and wait for acknowledgement of request. */
|
/* Lock condvar so that we can send and wait for acknowledgement of request. */
|
||||||
KScopedLightLock cv_lk(this->cv_lock);
|
KScopedLightLock cv_lk(m_cv_lock);
|
||||||
|
|
||||||
/* Check that there's no on-going operation. */
|
/* Check that there's no on-going operation. */
|
||||||
MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle);
|
MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle);
|
||||||
MESOSPHERE_ABORT_UNLESS(this->target_cores == 0);
|
MESOSPHERE_ABORT_UNLESS(m_target_cores == 0);
|
||||||
|
|
||||||
/* Set operation. */
|
/* Set operation. */
|
||||||
this->operation = op;
|
m_operation = op;
|
||||||
|
|
||||||
/* Request all cores. */
|
/* Request all cores. */
|
||||||
this->target_cores = AllCoresMask;
|
m_target_cores = AllCoresMask;
|
||||||
|
|
||||||
/* Use the condvar. */
|
/* Use the condvar. */
|
||||||
this->cv.Broadcast();
|
m_cv.Broadcast();
|
||||||
while (this->target_cores != 0) {
|
while (m_target_cores != 0) {
|
||||||
this->cv.Wait(std::addressof(this->cv_lock));
|
m_cv.Wait(std::addressof(m_cv_lock));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Go idle again. */
|
/* Go idle again. */
|
||||||
this->operation = Operation::Idle;
|
m_operation = Operation::Idle;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -283,7 +283,7 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KCacheHelperInterruptHandler::ProcessOperation() {
|
void KCacheHelperInterruptHandler::ProcessOperation() {
|
||||||
switch (this->operation) {
|
switch (m_operation) {
|
||||||
case Operation::Idle:
|
case Operation::Idle:
|
||||||
break;
|
break;
|
||||||
case Operation::InstructionMemoryBarrier:
|
case Operation::InstructionMemoryBarrier:
|
||||||
|
@ -299,7 +299,7 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
this->target_cores &= ~(1ul << GetCurrentCoreId());
|
m_target_cores &= ~(1ul << GetCurrentCoreId());
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void SetEventLocally() {
|
ALWAYS_INLINE void SetEventLocally() {
|
||||||
|
|
|
@ -22,7 +22,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
InitializeGlobalTimer();
|
InitializeGlobalTimer();
|
||||||
|
|
||||||
/* Set maximum time. */
|
/* Set maximum time. */
|
||||||
this->maximum_time = static_cast<s64>(std::min<u64>(std::numeric_limits<s64>::max(), cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor().GetCompareValue()));
|
m_maximum_time = static_cast<s64>(std::min<u64>(std::numeric_limits<s64>::max(), cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor().GetCompareValue()));
|
||||||
|
|
||||||
/* Bind the interrupt task for this core. */
|
/* Bind the interrupt task for this core. */
|
||||||
Kernel::GetInterruptManager().BindHandler(this, KInterruptName_NonSecurePhysicalTimer, GetCurrentCoreId(), KInterruptController::PriorityLevel_Timer, true, true);
|
Kernel::GetInterruptManager().BindHandler(this, KInterruptName_NonSecurePhysicalTimer, GetCurrentCoreId(), KInterruptController::PriorityLevel_Timer, true, true);
|
||||||
|
@ -41,7 +41,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Disable the timer interrupt while we handle this. */
|
/* Disable the timer interrupt while we handle this. */
|
||||||
DisableInterrupt();
|
DisableInterrupt();
|
||||||
if (const s64 next_time = this->DoInterruptTaskImpl(GetTick()); 0 < next_time && next_time <= this->maximum_time) {
|
if (const s64 next_time = this->DoInterruptTaskImpl(GetTick()); 0 < next_time && next_time <= m_maximum_time) {
|
||||||
/* We have a next time, so we should set the time to interrupt and turn the interrupt on. */
|
/* We have a next time, so we should set the time to interrupt and turn the interrupt on. */
|
||||||
SetCompareValue(next_time);
|
SetCompareValue(next_time);
|
||||||
EnableInterrupt();
|
EnableInterrupt();
|
||||||
|
|
|
@ -18,11 +18,11 @@
|
||||||
namespace ams::kern::arch::arm64 {
|
namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
void KInterruptManager::Initialize(s32 core_id) {
|
void KInterruptManager::Initialize(s32 core_id) {
|
||||||
this->interrupt_controller.Initialize(core_id);
|
m_interrupt_controller.Initialize(core_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KInterruptManager::Finalize(s32 core_id) {
|
void KInterruptManager::Finalize(s32 core_id) {
|
||||||
this->interrupt_controller.Finalize(core_id);
|
m_interrupt_controller.Finalize(core_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KInterruptManager::Save(s32 core_id) {
|
void KInterruptManager::Save(s32 core_id) {
|
||||||
|
@ -34,18 +34,18 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* If on core 0, save the global interrupts. */
|
/* If on core 0, save the global interrupts. */
|
||||||
if (core_id == 0) {
|
if (core_id == 0) {
|
||||||
MESOSPHERE_ABORT_UNLESS(!this->global_state_saved);
|
MESOSPHERE_ABORT_UNLESS(!m_global_state_saved);
|
||||||
this->interrupt_controller.SaveGlobal(std::addressof(this->global_state));
|
m_interrupt_controller.SaveGlobal(std::addressof(m_global_state));
|
||||||
this->global_state_saved = true;
|
m_global_state_saved = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ensure all cores get to this point before continuing. */
|
/* Ensure all cores get to this point before continuing. */
|
||||||
cpu::SynchronizeAllCores();
|
cpu::SynchronizeAllCores();
|
||||||
|
|
||||||
/* Save all local interrupts. */
|
/* Save all local interrupts. */
|
||||||
MESOSPHERE_ABORT_UNLESS(!this->local_state_saved[core_id]);
|
MESOSPHERE_ABORT_UNLESS(!m_local_state_saved[core_id]);
|
||||||
this->interrupt_controller.SaveCoreLocal(std::addressof(this->local_states[core_id]));
|
m_interrupt_controller.SaveCoreLocal(std::addressof(m_local_states[core_id]));
|
||||||
this->local_state_saved[core_id] = true;
|
m_local_state_saved[core_id] = true;
|
||||||
|
|
||||||
/* Ensure all cores get to this point before continuing. */
|
/* Ensure all cores get to this point before continuing. */
|
||||||
cpu::SynchronizeAllCores();
|
cpu::SynchronizeAllCores();
|
||||||
|
@ -88,18 +88,18 @@ namespace ams::kern::arch::arm64 {
|
||||||
cpu::SynchronizeAllCores();
|
cpu::SynchronizeAllCores();
|
||||||
|
|
||||||
/* Restore all local interrupts. */
|
/* Restore all local interrupts. */
|
||||||
MESOSPHERE_ASSERT(this->local_state_saved[core_id]);
|
MESOSPHERE_ASSERT(m_local_state_saved[core_id]);
|
||||||
this->interrupt_controller.RestoreCoreLocal(std::addressof(this->local_states[core_id]));
|
m_interrupt_controller.RestoreCoreLocal(std::addressof(m_local_states[core_id]));
|
||||||
this->local_state_saved[core_id] = false;
|
m_local_state_saved[core_id] = false;
|
||||||
|
|
||||||
/* Ensure all cores get to this point before continuing. */
|
/* Ensure all cores get to this point before continuing. */
|
||||||
cpu::SynchronizeAllCores();
|
cpu::SynchronizeAllCores();
|
||||||
|
|
||||||
/* If on core 0, restore the global interrupts. */
|
/* If on core 0, restore the global interrupts. */
|
||||||
if (core_id == 0) {
|
if (core_id == 0) {
|
||||||
MESOSPHERE_ASSERT(this->global_state_saved);
|
MESOSPHERE_ASSERT(m_global_state_saved);
|
||||||
this->interrupt_controller.RestoreGlobal(std::addressof(this->global_state));
|
m_interrupt_controller.RestoreGlobal(std::addressof(m_global_state));
|
||||||
this->global_state_saved = false;
|
m_global_state_saved = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ensure all cores get to this point before continuing. */
|
/* Ensure all cores get to this point before continuing. */
|
||||||
|
@ -108,7 +108,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
bool KInterruptManager::OnHandleInterrupt() {
|
bool KInterruptManager::OnHandleInterrupt() {
|
||||||
/* Get the interrupt id. */
|
/* Get the interrupt id. */
|
||||||
const u32 raw_irq = this->interrupt_controller.GetIrq();
|
const u32 raw_irq = m_interrupt_controller.GetIrq();
|
||||||
const s32 irq = KInterruptController::ConvertRawIrq(raw_irq);
|
const s32 irq = KInterruptController::ConvertRawIrq(raw_irq);
|
||||||
|
|
||||||
/* Trace the interrupt. */
|
/* Trace the interrupt. */
|
||||||
|
@ -126,7 +126,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
if (entry.handler != nullptr) {
|
if (entry.handler != nullptr) {
|
||||||
/* Set manual clear needed if relevant. */
|
/* Set manual clear needed if relevant. */
|
||||||
if (entry.manually_cleared) {
|
if (entry.manually_cleared) {
|
||||||
this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
||||||
entry.needs_clear = true;
|
entry.needs_clear = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
if (entry.handler != nullptr) {
|
if (entry.handler != nullptr) {
|
||||||
/* Set manual clear needed if relevant. */
|
/* Set manual clear needed if relevant. */
|
||||||
if (entry.manually_cleared) {
|
if (entry.manually_cleared) {
|
||||||
this->interrupt_controller.Disable(irq);
|
m_interrupt_controller.Disable(irq);
|
||||||
entry.needs_clear = true;
|
entry.needs_clear = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Acknowledge the interrupt. */
|
/* Acknowledge the interrupt. */
|
||||||
this->interrupt_controller.EndOfInterrupt(raw_irq);
|
m_interrupt_controller.EndOfInterrupt(raw_irq);
|
||||||
|
|
||||||
/* If we found no task, then we don't need to reschedule. */
|
/* If we found no task, then we don't need to reschedule. */
|
||||||
if (task == nullptr) {
|
if (task == nullptr) {
|
||||||
|
@ -273,16 +273,16 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Configure the interrupt as level or edge. */
|
/* Configure the interrupt as level or edge. */
|
||||||
if (level) {
|
if (level) {
|
||||||
this->interrupt_controller.SetLevel(irq);
|
m_interrupt_controller.SetLevel(irq);
|
||||||
} else {
|
} else {
|
||||||
this->interrupt_controller.SetEdge(irq);
|
m_interrupt_controller.SetEdge(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Configure the interrupt. */
|
/* Configure the interrupt. */
|
||||||
this->interrupt_controller.Clear(irq);
|
m_interrupt_controller.Clear(irq);
|
||||||
this->interrupt_controller.SetTarget(irq, core_id);
|
m_interrupt_controller.SetTarget(irq, core_id);
|
||||||
this->interrupt_controller.SetPriorityLevel(irq, priority);
|
m_interrupt_controller.SetPriorityLevel(irq, priority);
|
||||||
this->interrupt_controller.Enable(irq);
|
m_interrupt_controller.Enable(irq);
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -303,19 +303,19 @@ namespace ams::kern::arch::arm64 {
|
||||||
entry.priority = static_cast<u8>(priority);
|
entry.priority = static_cast<u8>(priority);
|
||||||
|
|
||||||
/* Configure the interrupt. */
|
/* Configure the interrupt. */
|
||||||
this->interrupt_controller.Clear(irq);
|
m_interrupt_controller.Clear(irq);
|
||||||
this->interrupt_controller.SetPriorityLevel(irq, priority);
|
m_interrupt_controller.SetPriorityLevel(irq, priority);
|
||||||
this->interrupt_controller.Enable(irq);
|
m_interrupt_controller.Enable(irq);
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KInterruptManager::UnbindGlobal(s32 irq) {
|
Result KInterruptManager::UnbindGlobal(s32 irq) {
|
||||||
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
|
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
|
||||||
this->interrupt_controller.ClearTarget(irq, static_cast<s32>(core_id));
|
m_interrupt_controller.ClearTarget(irq, static_cast<s32>(core_id));
|
||||||
}
|
}
|
||||||
this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
||||||
this->interrupt_controller.Disable(irq);
|
m_interrupt_controller.Disable(irq);
|
||||||
|
|
||||||
GetGlobalInterruptEntry(irq).handler = nullptr;
|
GetGlobalInterruptEntry(irq).handler = nullptr;
|
||||||
|
|
||||||
|
@ -326,8 +326,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
auto &entry = this->GetLocalInterruptEntry(irq);
|
auto &entry = this->GetLocalInterruptEntry(irq);
|
||||||
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
|
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
|
||||||
|
|
||||||
this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
||||||
this->interrupt_controller.Disable(irq);
|
m_interrupt_controller.Disable(irq);
|
||||||
|
|
||||||
entry.handler = nullptr;
|
entry.handler = nullptr;
|
||||||
|
|
||||||
|
@ -345,7 +345,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Clear and enable. */
|
/* Clear and enable. */
|
||||||
entry.needs_clear = false;
|
entry.needs_clear = false;
|
||||||
this->interrupt_controller.Enable(irq);
|
m_interrupt_controller.Enable(irq);
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,7 +360,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Clear and set priority. */
|
/* Clear and set priority. */
|
||||||
entry.needs_clear = false;
|
entry.needs_clear = false;
|
||||||
this->interrupt_controller.SetPriorityLevel(irq, entry.priority);
|
m_interrupt_controller.SetPriorityLevel(irq, entry.priority);
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,13 +21,13 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
class AlignedMemoryBlock {
|
class AlignedMemoryBlock {
|
||||||
private:
|
private:
|
||||||
uintptr_t before_start;
|
uintptr_t m_before_start;
|
||||||
uintptr_t before_end;
|
uintptr_t m_before_end;
|
||||||
uintptr_t after_start;
|
uintptr_t m_after_start;
|
||||||
uintptr_t after_end;
|
uintptr_t m_after_end;
|
||||||
size_t current_alignment;
|
size_t m_current_alignment;
|
||||||
public:
|
public:
|
||||||
constexpr AlignedMemoryBlock(uintptr_t start, size_t num_pages, size_t alignment) : before_start(0), before_end(0), after_start(0), after_end(0), current_alignment(0) {
|
constexpr AlignedMemoryBlock(uintptr_t start, size_t num_pages, size_t alignment) : m_before_start(0), m_before_end(0), m_after_start(0), m_after_end(0), m_current_alignment(0) {
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(start, PageSize));
|
MESOSPHERE_ASSERT(util::IsAligned(start, PageSize));
|
||||||
MESOSPHERE_ASSERT(num_pages > 0);
|
MESOSPHERE_ASSERT(num_pages > 0);
|
||||||
|
|
||||||
|
@ -38,41 +38,41 @@ namespace ams::kern::arch::arm64 {
|
||||||
alignment = KPageTable::GetSmallerAlignment(alignment * PageSize) / PageSize;
|
alignment = KPageTable::GetSmallerAlignment(alignment * PageSize) / PageSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
this->before_start = start_page;
|
m_before_start = start_page;
|
||||||
this->before_end = util::AlignUp(start_page, alignment);
|
m_before_end = util::AlignUp(start_page, alignment);
|
||||||
this->after_start = this->before_end;
|
m_after_start = m_before_end;
|
||||||
this->after_end = start_page + num_pages;
|
m_after_end = start_page + num_pages;
|
||||||
this->current_alignment = alignment;
|
m_current_alignment = alignment;
|
||||||
MESOSPHERE_ASSERT(this->current_alignment > 0);
|
MESOSPHERE_ASSERT(m_current_alignment > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void SetAlignment(size_t alignment) {
|
constexpr void SetAlignment(size_t alignment) {
|
||||||
/* We can only ever decrease the granularity. */
|
/* We can only ever decrease the granularity. */
|
||||||
MESOSPHERE_ASSERT(this->current_alignment >= alignment / PageSize);
|
MESOSPHERE_ASSERT(m_current_alignment >= alignment / PageSize);
|
||||||
this->current_alignment = alignment / PageSize;
|
m_current_alignment = alignment / PageSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetAlignment() const {
|
constexpr size_t GetAlignment() const {
|
||||||
return this->current_alignment * PageSize;
|
return m_current_alignment * PageSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void FindBlock(uintptr_t &out, size_t &num_pages) {
|
constexpr void FindBlock(uintptr_t &out, size_t &num_pages) {
|
||||||
if ((this->after_end - this->after_start) >= this->current_alignment) {
|
if ((m_after_end - m_after_start) >= m_current_alignment) {
|
||||||
/* Select aligned memory from after block. */
|
/* Select aligned memory from after block. */
|
||||||
const size_t available_pages = util::AlignDown(this->after_end, this->current_alignment) - this->after_start;
|
const size_t available_pages = util::AlignDown(m_after_end, m_current_alignment) - m_after_start;
|
||||||
if (num_pages == 0 || available_pages < num_pages) {
|
if (num_pages == 0 || available_pages < num_pages) {
|
||||||
num_pages = available_pages;
|
num_pages = available_pages;
|
||||||
}
|
}
|
||||||
out = this->after_start * PageSize;
|
out = m_after_start * PageSize;
|
||||||
this->after_start += num_pages;
|
m_after_start += num_pages;
|
||||||
} else if ((this->before_end - this->before_start) >= this->current_alignment) {
|
} else if ((m_before_end - m_before_start) >= m_current_alignment) {
|
||||||
/* Select aligned memory from before block. */
|
/* Select aligned memory from before block. */
|
||||||
const size_t available_pages = this->before_end - util::AlignUp(this->before_start, this->current_alignment);
|
const size_t available_pages = m_before_end - util::AlignUp(m_before_start, m_current_alignment);
|
||||||
if (num_pages == 0 || available_pages < num_pages) {
|
if (num_pages == 0 || available_pages < num_pages) {
|
||||||
num_pages = available_pages;
|
num_pages = available_pages;
|
||||||
}
|
}
|
||||||
this->before_end -= num_pages;
|
m_before_end -= num_pages;
|
||||||
out = this->before_end * PageSize;
|
out = m_before_end * PageSize;
|
||||||
} else {
|
} else {
|
||||||
/* Neither after or before can get an aligned bit of memory. */
|
/* Neither after or before can get an aligned bit of memory. */
|
||||||
out = 0;
|
out = 0;
|
||||||
|
@ -95,32 +95,32 @@ namespace ams::kern::arch::arm64 {
|
||||||
static constexpr size_t NumWords = AsidCount / BitsPerWord;
|
static constexpr size_t NumWords = AsidCount / BitsPerWord;
|
||||||
static constexpr WordType FullWord = ~WordType(0u);
|
static constexpr WordType FullWord = ~WordType(0u);
|
||||||
private:
|
private:
|
||||||
WordType state[NumWords];
|
WordType m_state[NumWords];
|
||||||
KLightLock lock;
|
KLightLock m_lock;
|
||||||
u8 hint;
|
u8 m_hint;
|
||||||
private:
|
private:
|
||||||
constexpr bool TestImpl(u8 asid) const {
|
constexpr bool TestImpl(u8 asid) const {
|
||||||
return this->state[asid / BitsPerWord] & (1u << (asid % BitsPerWord));
|
return m_state[asid / BitsPerWord] & (1u << (asid % BitsPerWord));
|
||||||
}
|
}
|
||||||
constexpr void ReserveImpl(u8 asid) {
|
constexpr void ReserveImpl(u8 asid) {
|
||||||
MESOSPHERE_ASSERT(!this->TestImpl(asid));
|
MESOSPHERE_ASSERT(!this->TestImpl(asid));
|
||||||
this->state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord));
|
m_state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void ReleaseImpl(u8 asid) {
|
constexpr void ReleaseImpl(u8 asid) {
|
||||||
MESOSPHERE_ASSERT(this->TestImpl(asid));
|
MESOSPHERE_ASSERT(this->TestImpl(asid));
|
||||||
this->state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord));
|
m_state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u8 FindAvailable() const {
|
constexpr u8 FindAvailable() const {
|
||||||
for (size_t i = 0; i < util::size(this->state); i++) {
|
for (size_t i = 0; i < util::size(m_state); i++) {
|
||||||
if (this->state[i] == FullWord) {
|
if (m_state[i] == FullWord) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
const WordType clear_bit = (this->state[i] + 1) ^ (this->state[i]);
|
const WordType clear_bit = (m_state[i] + 1) ^ (m_state[i]);
|
||||||
return BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit);
|
return BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit);
|
||||||
}
|
}
|
||||||
if (this->state[util::size(this->state)-1] == FullWord) {
|
if (m_state[util::size(m_state)-1] == FullWord) {
|
||||||
MESOSPHERE_PANIC("Unable to reserve ASID");
|
MESOSPHERE_PANIC("Unable to reserve ASID");
|
||||||
}
|
}
|
||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
|
@ -130,26 +130,26 @@ namespace ams::kern::arch::arm64 {
|
||||||
return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType));
|
return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType));
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KPageTableAsidManager() : state(), lock(), hint() {
|
constexpr KPageTableAsidManager() : m_state(), m_lock(), m_hint() {
|
||||||
for (size_t i = 0; i < NumReservedAsids; i++) {
|
for (size_t i = 0; i < NumReservedAsids; i++) {
|
||||||
this->ReserveImpl(ReservedAsids[i]);
|
this->ReserveImpl(ReservedAsids[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u8 Reserve() {
|
u8 Reserve() {
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
if (this->TestImpl(this->hint)) {
|
if (this->TestImpl(m_hint)) {
|
||||||
this->hint = this->FindAvailable();
|
m_hint = this->FindAvailable();
|
||||||
}
|
}
|
||||||
|
|
||||||
this->ReserveImpl(this->hint);
|
this->ReserveImpl(m_hint);
|
||||||
|
|
||||||
return this->hint++;
|
return m_hint++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Release(u8 asid) {
|
void Release(u8 asid) {
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
this->ReleaseImpl(asid);
|
this->ReleaseImpl(asid);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -165,15 +165,15 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
Result KPageTable::InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end) {
|
Result KPageTable::InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end) {
|
||||||
/* Initialize basic fields. */
|
/* Initialize basic fields. */
|
||||||
this->asid = 0;
|
m_asid = 0;
|
||||||
this->manager = std::addressof(Kernel::GetPageTableManager());
|
m_manager = std::addressof(Kernel::GetPageTableManager());
|
||||||
|
|
||||||
/* Allocate a page for ttbr. */
|
/* Allocate a page for ttbr. */
|
||||||
const u64 asid_tag = (static_cast<u64>(this->asid) << 48ul);
|
const u64 asid_tag = (static_cast<u64>(m_asid) << 48ul);
|
||||||
const KVirtualAddress page = this->manager->Allocate();
|
const KVirtualAddress page = m_manager->Allocate();
|
||||||
MESOSPHERE_ASSERT(page != Null<KVirtualAddress>);
|
MESOSPHERE_ASSERT(page != Null<KVirtualAddress>);
|
||||||
cpu::ClearPageToZero(GetVoidPointer(page));
|
cpu::ClearPageToZero(GetVoidPointer(page));
|
||||||
this->ttbr = GetInteger(KPageTableBase::GetLinearMappedPhysicalAddress(page)) | asid_tag;
|
m_ttbr = GetInteger(KPageTableBase::GetLinearMappedPhysicalAddress(page)) | asid_tag;
|
||||||
|
|
||||||
/* Initialize the base page table. */
|
/* Initialize the base page table. */
|
||||||
MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end));
|
MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end));
|
||||||
|
@ -186,17 +186,17 @@ namespace ams::kern::arch::arm64 {
|
||||||
MESOSPHERE_UNUSED(id);
|
MESOSPHERE_UNUSED(id);
|
||||||
|
|
||||||
/* Get an ASID */
|
/* Get an ASID */
|
||||||
this->asid = g_asid_manager.Reserve();
|
m_asid = g_asid_manager.Reserve();
|
||||||
auto asid_guard = SCOPE_GUARD { g_asid_manager.Release(this->asid); };
|
auto asid_guard = SCOPE_GUARD { g_asid_manager.Release(m_asid); };
|
||||||
|
|
||||||
/* Set our manager. */
|
/* Set our manager. */
|
||||||
this->manager = pt_manager;
|
m_manager = pt_manager;
|
||||||
|
|
||||||
/* Allocate a new table, and set our ttbr value. */
|
/* Allocate a new table, and set our ttbr value. */
|
||||||
const KVirtualAddress new_table = this->manager->Allocate();
|
const KVirtualAddress new_table = m_manager->Allocate();
|
||||||
R_UNLESS(new_table != Null<KVirtualAddress>, svc::ResultOutOfResource());
|
R_UNLESS(new_table != Null<KVirtualAddress>, svc::ResultOutOfResource());
|
||||||
this->ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), asid);
|
m_ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), m_asid);
|
||||||
auto table_guard = SCOPE_GUARD { this->manager->Free(new_table); };
|
auto table_guard = SCOPE_GUARD { m_manager->Free(new_table); };
|
||||||
|
|
||||||
/* Initialize our base table. */
|
/* Initialize our base table. */
|
||||||
const size_t as_width = GetAddressSpaceWidth(as_type);
|
const size_t as_width = GetAddressSpaceWidth(as_type);
|
||||||
|
@ -308,7 +308,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Release our asid. */
|
/* Release our asid. */
|
||||||
g_asid_manager.Release(this->asid);
|
g_asid_manager.Release(m_asid);
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,19 +18,19 @@
|
||||||
namespace ams::kern::arch::arm64 {
|
namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
void KPageTableImpl::InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end) {
|
void KPageTableImpl::InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end) {
|
||||||
this->table = static_cast<L1PageTableEntry *>(tb);
|
m_table = static_cast<L1PageTableEntry *>(tb);
|
||||||
this->is_kernel = true;
|
m_is_kernel = true;
|
||||||
this->num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
|
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) {
|
void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) {
|
||||||
this->table = static_cast<L1PageTableEntry *>(tb);
|
m_table = static_cast<L1PageTableEntry *>(tb);
|
||||||
this->is_kernel = false;
|
m_is_kernel = false;
|
||||||
this->num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
|
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
L1PageTableEntry *KPageTableImpl::Finalize() {
|
L1PageTableEntry *KPageTableImpl::Finalize() {
|
||||||
return this->table;
|
return m_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const {
|
bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const {
|
||||||
|
@ -119,21 +119,21 @@ namespace ams::kern::arch::arm64 {
|
||||||
out_entry->phys_addr = Null<KPhysicalAddress>;
|
out_entry->phys_addr = Null<KPhysicalAddress>;
|
||||||
out_entry->block_size = L1BlockSize;
|
out_entry->block_size = L1BlockSize;
|
||||||
out_entry->sw_reserved_bits = 0;
|
out_entry->sw_reserved_bits = 0;
|
||||||
out_context->l1_entry = this->table + this->num_entries;
|
out_context->l1_entry = m_table + m_num_entries;
|
||||||
out_context->l2_entry = nullptr;
|
out_context->l2_entry = nullptr;
|
||||||
out_context->l3_entry = nullptr;
|
out_context->l3_entry = nullptr;
|
||||||
|
|
||||||
/* Validate that we can read the actual entry. */
|
/* Validate that we can read the actual entry. */
|
||||||
const size_t l0_index = GetL0Index(address);
|
const size_t l0_index = GetL0Index(address);
|
||||||
const size_t l1_index = GetL1Index(address);
|
const size_t l1_index = GetL1Index(address);
|
||||||
if (this->is_kernel) {
|
if (m_is_kernel) {
|
||||||
/* Kernel entries must be accessed via TTBR1. */
|
/* Kernel entries must be accessed via TTBR1. */
|
||||||
if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - this->num_entries)) {
|
if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* User entries must be accessed with TTBR0. */
|
/* User entries must be accessed with TTBR0. */
|
||||||
if ((l0_index != 0) || l1_index >= this->num_entries) {
|
if ((l0_index != 0) || l1_index >= m_num_entries) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -212,15 +212,15 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* We need to update the l1 entry. */
|
/* We need to update the l1 entry. */
|
||||||
const size_t l1_index = context->l1_entry - this->table;
|
const size_t l1_index = context->l1_entry - m_table;
|
||||||
if (l1_index < this->num_entries) {
|
if (l1_index < m_num_entries) {
|
||||||
valid = this->ExtractL1Entry(out_entry, context, context->l1_entry, Null<KProcessAddress>);
|
valid = this->ExtractL1Entry(out_entry, context, context->l1_entry, Null<KProcessAddress>);
|
||||||
} else {
|
} else {
|
||||||
/* Invalid, end traversal. */
|
/* Invalid, end traversal. */
|
||||||
out_entry->phys_addr = Null<KPhysicalAddress>;
|
out_entry->phys_addr = Null<KPhysicalAddress>;
|
||||||
out_entry->block_size = L1BlockSize;
|
out_entry->block_size = L1BlockSize;
|
||||||
out_entry->sw_reserved_bits = 0;
|
out_entry->sw_reserved_bits = 0;
|
||||||
context->l1_entry = this->table + this->num_entries;
|
context->l1_entry = m_table + m_num_entries;
|
||||||
context->l2_entry = nullptr;
|
context->l2_entry = nullptr;
|
||||||
context->l3_entry = nullptr;
|
context->l3_entry = nullptr;
|
||||||
return false;
|
return false;
|
||||||
|
@ -262,14 +262,14 @@ namespace ams::kern::arch::arm64 {
|
||||||
/* Validate that we can read the actual entry. */
|
/* Validate that we can read the actual entry. */
|
||||||
const size_t l0_index = GetL0Index(address);
|
const size_t l0_index = GetL0Index(address);
|
||||||
const size_t l1_index = GetL1Index(address);
|
const size_t l1_index = GetL1Index(address);
|
||||||
if (this->is_kernel) {
|
if (m_is_kernel) {
|
||||||
/* Kernel entries must be accessed via TTBR1. */
|
/* Kernel entries must be accessed via TTBR1. */
|
||||||
if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - this->num_entries)) {
|
if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* User entries must be accessed with TTBR0. */
|
/* User entries must be accessed with TTBR0. */
|
||||||
if ((l0_index != 0) || l1_index >= this->num_entries) {
|
if ((l0_index != 0) || l1_index >= m_num_entries) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -322,14 +322,14 @@ namespace ams::kern::arch::arm64 {
|
||||||
/* Validate that we can read the actual entry. */
|
/* Validate that we can read the actual entry. */
|
||||||
const size_t l0_index = GetL0Index(cur);
|
const size_t l0_index = GetL0Index(cur);
|
||||||
const size_t l1_index = GetL1Index(cur);
|
const size_t l1_index = GetL1Index(cur);
|
||||||
if (this->is_kernel) {
|
if (m_is_kernel) {
|
||||||
/* Kernel entries must be accessed via TTBR1. */
|
/* Kernel entries must be accessed via TTBR1. */
|
||||||
if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - this->num_entries)) {
|
if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* User entries must be accessed with TTBR0. */
|
/* User entries must be accessed with TTBR0. */
|
||||||
if ((l0_index != 0) || l1_index >= this->num_entries) {
|
if ((l0_index != 0) || l1_index >= m_num_entries) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -482,8 +482,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
|
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
|
||||||
{
|
{
|
||||||
++num_tables;
|
++num_tables;
|
||||||
for (size_t l1_index = 0; l1_index < this->num_entries; ++l1_index) {
|
for (size_t l1_index = 0; l1_index < m_num_entries; ++l1_index) {
|
||||||
auto &l1_entry = this->table[l1_index];
|
auto &l1_entry = m_table[l1_index];
|
||||||
if (l1_entry.IsTable()) {
|
if (l1_entry.IsTable()) {
|
||||||
++num_tables;
|
++num_tables;
|
||||||
for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) {
|
for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) {
|
||||||
|
|
|
@ -19,7 +19,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
void KSupervisorPageTable::Initialize(s32 core_id) {
|
void KSupervisorPageTable::Initialize(s32 core_id) {
|
||||||
/* Get the identity mapping ttbr0. */
|
/* Get the identity mapping ttbr0. */
|
||||||
this->ttbr0_identity[core_id] = cpu::GetTtbr0El1();
|
m_ttbr0_identity[core_id] = cpu::GetTtbr0El1();
|
||||||
|
|
||||||
/* Set sctlr_el1 */
|
/* Set sctlr_el1 */
|
||||||
cpu::SystemControlRegisterAccessor().SetWxn(true).Store();
|
cpu::SystemControlRegisterAccessor().SetWxn(true).Store();
|
||||||
|
@ -35,7 +35,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
const u64 kernel_vaddr_start = 0xFFFFFF8000000000ul;
|
const u64 kernel_vaddr_start = 0xFFFFFF8000000000ul;
|
||||||
const u64 kernel_vaddr_end = 0xFFFFFFFFFFE00000ul;
|
const u64 kernel_vaddr_end = 0xFFFFFFFFFFE00000ul;
|
||||||
void *table = GetVoidPointer(KPageTableBase::GetLinearMappedVirtualAddress(ttbr1));
|
void *table = GetVoidPointer(KPageTableBase::GetLinearMappedVirtualAddress(ttbr1));
|
||||||
this->page_table.InitializeForKernel(table, kernel_vaddr_start, kernel_vaddr_end);
|
m_page_table.InitializeForKernel(table, kernel_vaddr_start, kernel_vaddr_end);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -117,38 +117,38 @@ namespace ams::kern::arch::arm64 {
|
||||||
/* Determine LR and SP. */
|
/* Determine LR and SP. */
|
||||||
if (is_user) {
|
if (is_user) {
|
||||||
/* Usermode thread. */
|
/* Usermode thread. */
|
||||||
this->lr = reinterpret_cast<uintptr_t>(::ams::kern::arch::arm64::UserModeThreadStarter);
|
m_lr = reinterpret_cast<uintptr_t>(::ams::kern::arch::arm64::UserModeThreadStarter);
|
||||||
this->sp = SetupStackForUserModeThreadStarter(u_pc, k_sp, u_sp, arg, is_64_bit);
|
m_sp = SetupStackForUserModeThreadStarter(u_pc, k_sp, u_sp, arg, is_64_bit);
|
||||||
} else {
|
} else {
|
||||||
/* Kernel thread. */
|
/* Kernel thread. */
|
||||||
MESOSPHERE_ASSERT(is_64_bit);
|
MESOSPHERE_ASSERT(is_64_bit);
|
||||||
|
|
||||||
if (is_main) {
|
if (is_main) {
|
||||||
/* Main thread. */
|
/* Main thread. */
|
||||||
this->lr = GetInteger(u_pc);
|
m_lr = GetInteger(u_pc);
|
||||||
this->sp = GetInteger(k_sp);
|
m_sp = GetInteger(k_sp);
|
||||||
} else {
|
} else {
|
||||||
/* Generic Kernel thread. */
|
/* Generic Kernel thread. */
|
||||||
this->lr = reinterpret_cast<uintptr_t>(::ams::kern::arch::arm64::SupervisorModeThreadStarter);
|
m_lr = reinterpret_cast<uintptr_t>(::ams::kern::arch::arm64::SupervisorModeThreadStarter);
|
||||||
this->sp = SetupStackForSupervisorModeThreadStarter(u_pc, k_sp, arg);
|
m_sp = SetupStackForSupervisorModeThreadStarter(u_pc, k_sp, arg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear callee-saved registers. */
|
/* Clear callee-saved registers. */
|
||||||
for (size_t i = 0; i < util::size(this->callee_saved.registers); i++) {
|
for (size_t i = 0; i < util::size(m_callee_saved.registers); i++) {
|
||||||
this->callee_saved.registers[i] = 0;
|
m_callee_saved.registers[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear FPU state. */
|
/* Clear FPU state. */
|
||||||
this->fpcr = 0;
|
m_fpcr = 0;
|
||||||
this->fpsr = 0;
|
m_fpsr = 0;
|
||||||
this->cpacr = 0;
|
m_cpacr = 0;
|
||||||
for (size_t i = 0; i < util::size(this->fpu_registers); i++) {
|
for (size_t i = 0; i < util::size(m_fpu_registers); i++) {
|
||||||
this->fpu_registers[i] = 0;
|
m_fpu_registers[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Lock the context, if we're a main thread. */
|
/* Lock the context, if we're a main thread. */
|
||||||
this->locked = is_main;
|
m_locked = is_main;
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KThreadContext::SetArguments(uintptr_t arg0, uintptr_t arg1) {
|
void KThreadContext::SetArguments(uintptr_t arg0, uintptr_t arg1) {
|
||||||
u64 *stack = reinterpret_cast<u64 *>(this->sp);
|
u64 *stack = reinterpret_cast<u64 *>(m_sp);
|
||||||
stack[0] = arg0;
|
stack[0] = arg0;
|
||||||
stack[1] = arg1;
|
stack[1] = arg1;
|
||||||
}
|
}
|
||||||
|
@ -199,11 +199,11 @@ namespace ams::kern::arch::arm64 {
|
||||||
void KThreadContext::SetFpuRegisters(const u128 *v, bool is_64_bit) {
|
void KThreadContext::SetFpuRegisters(const u128 *v, bool is_64_bit) {
|
||||||
if (is_64_bit) {
|
if (is_64_bit) {
|
||||||
for (size_t i = 0; i < KThreadContext::NumFpuRegisters; ++i) {
|
for (size_t i = 0; i < KThreadContext::NumFpuRegisters; ++i) {
|
||||||
this->fpu_registers[i] = v[i];
|
m_fpu_registers[i] = v[i];
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (size_t i = 0; i < KThreadContext::NumFpuRegisters / 2; ++i) {
|
for (size_t i = 0; i < KThreadContext::NumFpuRegisters / 2; ++i) {
|
||||||
this->fpu_registers[i] = v[i];
|
m_fpu_registers[i] = v[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -210,10 +210,10 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
Bit_Readable = 31,
|
Bit_Readable = 31,
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
u32 value;
|
u32 m_value;
|
||||||
protected:
|
protected:
|
||||||
constexpr ALWAYS_INLINE u32 SelectBit(Bit n) const {
|
constexpr ALWAYS_INLINE u32 SelectBit(Bit n) const {
|
||||||
return (this->value & (1u << n));
|
return (m_value & (1u << n));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool GetBit(Bit n) const {
|
constexpr ALWAYS_INLINE bool GetBit(Bit n) const {
|
||||||
|
@ -231,7 +231,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
ALWAYS_INLINE void SetValue(u32 v) {
|
ALWAYS_INLINE void SetValue(u32 v) {
|
||||||
/* Prevent re-ordering around entry modifications. */
|
/* Prevent re-ordering around entry modifications. */
|
||||||
__asm__ __volatile__("" ::: "memory");
|
__asm__ __volatile__("" ::: "memory");
|
||||||
this->value = v;
|
m_value = v;
|
||||||
__asm__ __volatile__("" ::: "memory");
|
__asm__ __volatile__("" ::: "memory");
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
|
@ -246,7 +246,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u32 GetAttributes() const { return this->SelectBit(Bit_NonSecure) | this->SelectBit(Bit_Writeable) | this->SelectBit(Bit_Readable); }
|
constexpr ALWAYS_INLINE u32 GetAttributes() const { return this->SelectBit(Bit_NonSecure) | this->SelectBit(Bit_Writeable) | this->SelectBit(Bit_Readable); }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const { return (static_cast<u64>(this->value) << DevicePageBits) & PhysicalAddressMask; }
|
constexpr ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const { return (static_cast<u64>(m_value) << DevicePageBits) & PhysicalAddressMask; }
|
||||||
|
|
||||||
ALWAYS_INLINE void Invalidate() { this->SetValue(0); }
|
ALWAYS_INLINE void Invalidate() { this->SetValue(0); }
|
||||||
};
|
};
|
||||||
|
@ -286,36 +286,36 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
static constexpr size_t NumWords = AsidCount / BitsPerWord;
|
static constexpr size_t NumWords = AsidCount / BitsPerWord;
|
||||||
static constexpr WordType FullWord = ~WordType(0u);
|
static constexpr WordType FullWord = ~WordType(0u);
|
||||||
private:
|
private:
|
||||||
WordType state[NumWords];
|
WordType m_state[NumWords];
|
||||||
KLightLock lock;
|
KLightLock m_lock;
|
||||||
private:
|
private:
|
||||||
constexpr void ReserveImpl(u8 asid) {
|
constexpr void ReserveImpl(u8 asid) {
|
||||||
this->state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord));
|
m_state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void ReleaseImpl(u8 asid) {
|
constexpr void ReleaseImpl(u8 asid) {
|
||||||
this->state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord));
|
m_state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord));
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr ALWAYS_INLINE WordType ClearLeadingZero(WordType value) {
|
static constexpr ALWAYS_INLINE WordType ClearLeadingZero(WordType value) {
|
||||||
return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType));
|
return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType));
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KDeviceAsidManager() : state(), lock() {
|
constexpr KDeviceAsidManager() : m_state(), m_lock() {
|
||||||
for (size_t i = 0; i < NumReservedAsids; i++) {
|
for (size_t i = 0; i < NumReservedAsids; i++) {
|
||||||
this->ReserveImpl(ReservedAsids[i]);
|
this->ReserveImpl(ReservedAsids[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Result Reserve(u8 *out, size_t num_desired) {
|
Result Reserve(u8 *out, size_t num_desired) {
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
MESOSPHERE_ASSERT(num_desired > 0);
|
MESOSPHERE_ASSERT(num_desired > 0);
|
||||||
|
|
||||||
size_t num_reserved = 0;
|
size_t num_reserved = 0;
|
||||||
for (size_t i = 0; i < NumWords; i++) {
|
for (size_t i = 0; i < NumWords; i++) {
|
||||||
while (this->state[i] != FullWord) {
|
while (m_state[i] != FullWord) {
|
||||||
const WordType clear_bit = (this->state[i] + 1) ^ (this->state[i]);
|
const WordType clear_bit = (m_state[i] + 1) ^ (m_state[i]);
|
||||||
this->state[i] |= clear_bit;
|
m_state[i] |= clear_bit;
|
||||||
out[num_reserved++] = static_cast<u8>(BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit));
|
out[num_reserved++] = static_cast<u8>(BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit));
|
||||||
R_SUCCEED_IF(num_reserved == num_desired);
|
R_SUCCEED_IF(num_reserved == num_desired);
|
||||||
}
|
}
|
||||||
|
@ -329,7 +329,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Release(u8 asid) {
|
void Release(u8 asid) {
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
this->ReleaseImpl(asid);
|
this->ReleaseImpl(asid);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -776,14 +776,14 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
/* Clear the tables. */
|
/* Clear the tables. */
|
||||||
static_assert(TableCount == (1ul << DeviceVirtualAddressBits) / DeviceRegionSize);
|
static_assert(TableCount == (1ul << DeviceVirtualAddressBits) / DeviceRegionSize);
|
||||||
for (size_t i = 0; i < TableCount; ++i) {
|
for (size_t i = 0; i < TableCount; ++i) {
|
||||||
this->tables[i] = Null<KVirtualAddress>;
|
m_tables[i] = Null<KVirtualAddress>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ensure that we clean up the tables on failure. */
|
/* Ensure that we clean up the tables on failure. */
|
||||||
auto table_guard = SCOPE_GUARD {
|
auto table_guard = SCOPE_GUARD {
|
||||||
for (size_t i = start_index; i <= end_index; ++i) {
|
for (size_t i = start_index; i <= end_index; ++i) {
|
||||||
if (this->tables[i] != Null<KVirtualAddress> && ptm.Close(this->tables[i], 1)) {
|
if (m_tables[i] != Null<KVirtualAddress> && ptm.Close(m_tables[i], 1)) {
|
||||||
ptm.Free(this->tables[i]);
|
ptm.Free(m_tables[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -797,32 +797,32 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
ptm.Open(table_vaddr, 1);
|
ptm.Open(table_vaddr, 1);
|
||||||
cpu::StoreDataCache(GetVoidPointer(table_vaddr), PageDirectorySize);
|
cpu::StoreDataCache(GetVoidPointer(table_vaddr), PageDirectorySize);
|
||||||
this->tables[i] = table_vaddr;
|
m_tables[i] = table_vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear asids. */
|
/* Clear asids. */
|
||||||
for (size_t i = 0; i < TableCount; ++i) {
|
for (size_t i = 0; i < TableCount; ++i) {
|
||||||
this->table_asids[i] = g_reserved_asid;
|
m_table_asids[i] = g_reserved_asid;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reserve asids for the tables. */
|
/* Reserve asids for the tables. */
|
||||||
R_TRY(g_asid_manager.Reserve(std::addressof(this->table_asids[start_index]), end_index - start_index + 1));
|
R_TRY(g_asid_manager.Reserve(std::addressof(m_table_asids[start_index]), end_index - start_index + 1));
|
||||||
|
|
||||||
/* Associate tables with asids. */
|
/* Associate tables with asids. */
|
||||||
for (size_t i = start_index; i <= end_index; ++i) {
|
for (size_t i = start_index; i <= end_index; ++i) {
|
||||||
SetTable(this->table_asids[i], GetPageTablePhysicalAddress(this->tables[i]));
|
SetTable(m_table_asids[i], GetPageTablePhysicalAddress(m_tables[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set member variables. */
|
/* Set member variables. */
|
||||||
this->attached_device = 0;
|
m_attached_device = 0;
|
||||||
this->attached_value = (1u << 31) | this->table_asids[0];
|
m_attached_value = (1u << 31) | m_table_asids[0];
|
||||||
this->detached_value = (1u << 31) | g_reserved_asid;
|
m_detached_value = (1u << 31) | g_reserved_asid;
|
||||||
|
|
||||||
this->hs_attached_value = (1u << 31);
|
m_hs_attached_value = (1u << 31);
|
||||||
this->hs_detached_value = (1u << 31);
|
m_hs_detached_value = (1u << 31);
|
||||||
for (size_t i = 0; i < TableCount; ++i) {
|
for (size_t i = 0; i < TableCount; ++i) {
|
||||||
this->hs_attached_value |= (this->table_asids[i] << (i * BITSIZEOF(u8)));
|
m_hs_attached_value |= (m_table_asids[i] << (i * BITSIZEOF(u8)));
|
||||||
this->hs_detached_value |= (g_reserved_asid << (i * BITSIZEOF(u8)));
|
m_hs_detached_value |= (g_reserved_asid << (i * BITSIZEOF(u8)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We succeeded. */
|
/* We succeeded. */
|
||||||
|
@ -839,8 +839,8 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
KScopedLightLock lk(g_lock);
|
KScopedLightLock lk(g_lock);
|
||||||
for (size_t i = 0; i < ams::svc::DeviceName_Count; ++i) {
|
for (size_t i = 0; i < ams::svc::DeviceName_Count; ++i) {
|
||||||
const auto device_name = static_cast<ams::svc::DeviceName>(i);
|
const auto device_name = static_cast<ams::svc::DeviceName>(i);
|
||||||
if ((this->attached_device & (1ul << device_name)) != 0) {
|
if ((m_attached_device & (1ul << device_name)) != 0) {
|
||||||
WriteMcRegister(GetDeviceAsidRegisterOffset(device_name), IsHsSupported(device_name) ? this->hs_detached_value : this->detached_value);
|
WriteMcRegister(GetDeviceAsidRegisterOffset(device_name), IsHsSupported(device_name) ? m_hs_detached_value : m_detached_value);
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -851,12 +851,12 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Release all asids. */
|
/* Release all asids. */
|
||||||
for (size_t i = 0; i < TableCount; ++i) {
|
for (size_t i = 0; i < TableCount; ++i) {
|
||||||
if (this->table_asids[i] != g_reserved_asid) {
|
if (m_table_asids[i] != g_reserved_asid) {
|
||||||
/* Set the table to the reserved table. */
|
/* Set the table to the reserved table. */
|
||||||
SetTable(this->table_asids[i], g_reserved_table_phys_addr);
|
SetTable(m_table_asids[i], g_reserved_table_phys_addr);
|
||||||
|
|
||||||
/* Close the table. */
|
/* Close the table. */
|
||||||
const KVirtualAddress table_vaddr = this->tables[i];
|
const KVirtualAddress table_vaddr = m_tables[i];
|
||||||
MESOSPHERE_ASSERT(ptm.GetRefCount(table_vaddr) == 1);
|
MESOSPHERE_ASSERT(ptm.GetRefCount(table_vaddr) == 1);
|
||||||
MESOSPHERE_ABORT_UNLESS(ptm.Close(table_vaddr, 1));
|
MESOSPHERE_ABORT_UNLESS(ptm.Close(table_vaddr, 1));
|
||||||
|
|
||||||
|
@ -864,7 +864,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
ptm.Free(table_vaddr);
|
ptm.Free(table_vaddr);
|
||||||
|
|
||||||
/* Release the asid. */
|
/* Release the asid. */
|
||||||
g_asid_manager.Release(this->table_asids[i]);
|
g_asid_manager.Release(m_table_asids[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -875,7 +875,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
R_UNLESS(device_name < ams::svc::DeviceName_Count, svc::ResultNotFound());
|
R_UNLESS(device_name < ams::svc::DeviceName_Count, svc::ResultNotFound());
|
||||||
|
|
||||||
/* Check that the device isn't already attached. */
|
/* Check that the device isn't already attached. */
|
||||||
R_UNLESS((this->attached_device & (1ul << device_name)) == 0, svc::ResultBusy());
|
R_UNLESS((m_attached_device & (1ul << device_name)) == 0, svc::ResultBusy());
|
||||||
|
|
||||||
/* Validate that the space is allowed for the device. */
|
/* Validate that the space is allowed for the device. */
|
||||||
const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize;
|
const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize;
|
||||||
|
@ -889,8 +889,8 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
R_UNLESS(reg_offset >= 0, svc::ResultNotFound());
|
R_UNLESS(reg_offset >= 0, svc::ResultNotFound());
|
||||||
|
|
||||||
/* Determine the old/new values. */
|
/* Determine the old/new values. */
|
||||||
const u32 old_val = IsHsSupported(device_name) ? this->hs_detached_value : this->detached_value;
|
const u32 old_val = IsHsSupported(device_name) ? m_hs_detached_value : m_detached_value;
|
||||||
const u32 new_val = IsHsSupported(device_name) ? this->hs_attached_value : this->attached_value;
|
const u32 new_val = IsHsSupported(device_name) ? m_hs_attached_value : m_attached_value;
|
||||||
|
|
||||||
/* Attach the device. */
|
/* Attach the device. */
|
||||||
{
|
{
|
||||||
|
@ -912,7 +912,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark the device as attached. */
|
/* Mark the device as attached. */
|
||||||
this->attached_device |= (1ul << device_name);
|
m_attached_device |= (1ul << device_name);
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -923,15 +923,15 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
R_UNLESS(device_name < ams::svc::DeviceName_Count, svc::ResultNotFound());
|
R_UNLESS(device_name < ams::svc::DeviceName_Count, svc::ResultNotFound());
|
||||||
|
|
||||||
/* Check that the device is already attached. */
|
/* Check that the device is already attached. */
|
||||||
R_UNLESS((this->attached_device & (1ul << device_name)) != 0, svc::ResultInvalidState());
|
R_UNLESS((m_attached_device & (1ul << device_name)) != 0, svc::ResultInvalidState());
|
||||||
|
|
||||||
/* Get the device asid register offset. */
|
/* Get the device asid register offset. */
|
||||||
const int reg_offset = GetDeviceAsidRegisterOffset(device_name);
|
const int reg_offset = GetDeviceAsidRegisterOffset(device_name);
|
||||||
R_UNLESS(reg_offset >= 0, svc::ResultNotFound());
|
R_UNLESS(reg_offset >= 0, svc::ResultNotFound());
|
||||||
|
|
||||||
/* Determine the old/new values. */
|
/* Determine the old/new values. */
|
||||||
const u32 old_val = IsHsSupported(device_name) ? this->hs_attached_value : this->attached_value;
|
const u32 old_val = IsHsSupported(device_name) ? m_hs_attached_value : m_attached_value;
|
||||||
const u32 new_val = IsHsSupported(device_name) ? this->hs_detached_value : this->detached_value;
|
const u32 new_val = IsHsSupported(device_name) ? m_hs_detached_value : m_detached_value;
|
||||||
|
|
||||||
/* When not building for debug, the old value might be unused. */
|
/* When not building for debug, the old value might be unused. */
|
||||||
AMS_UNUSED(old_val);
|
AMS_UNUSED(old_val);
|
||||||
|
@ -952,7 +952,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark the device as detached. */
|
/* Mark the device as detached. */
|
||||||
this->attached_device &= ~(1ul << device_name);
|
m_attached_device &= ~(1ul << device_name);
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -968,7 +968,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize;
|
const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize;
|
||||||
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
|
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
|
||||||
|
|
||||||
const PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(this->tables[l0_index]);
|
const PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(m_tables[l0_index]);
|
||||||
if (l1 == nullptr || !l1[l1_index].IsValid()) {
|
if (l1 == nullptr || !l1[l1_index].IsValid()) {
|
||||||
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
|
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
|
||||||
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
|
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
|
||||||
|
@ -1023,7 +1023,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
|
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
|
||||||
|
|
||||||
/* Get and validate l1. */
|
/* Get and validate l1. */
|
||||||
PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(this->tables[l0_index]);
|
PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(m_tables[l0_index]);
|
||||||
MESOSPHERE_ASSERT(l1 != nullptr);
|
MESOSPHERE_ASSERT(l1 != nullptr);
|
||||||
|
|
||||||
/* Setup an l1 table/entry, if needed. */
|
/* Setup an l1 table/entry, if needed. */
|
||||||
|
@ -1039,7 +1039,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Synchronize. */
|
/* Synchronize. */
|
||||||
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
|
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
|
||||||
InvalidateTlbSection(this->table_asids[l0_index], address);
|
InvalidateTlbSection(m_table_asids[l0_index], address);
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
|
|
||||||
/* Open references to the pages. */
|
/* Open references to the pages. */
|
||||||
|
@ -1066,7 +1066,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Synchronize. */
|
/* Synchronize. */
|
||||||
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
|
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
|
||||||
InvalidateTlbSection(this->table_asids[l0_index], address);
|
InvalidateTlbSection(m_table_asids[l0_index], address);
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
|
|
||||||
/* Increment the page table count. */
|
/* Increment the page table count. */
|
||||||
|
@ -1100,7 +1100,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Synchronize. */
|
/* Synchronize. */
|
||||||
InvalidateTlbSection(this->table_asids[l0_index], address);
|
InvalidateTlbSection(m_table_asids[l0_index], address);
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
|
|
||||||
/* Open references to the pages. */
|
/* Open references to the pages. */
|
||||||
|
@ -1181,7 +1181,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
|
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
|
||||||
|
|
||||||
/* Get and validate l1. */
|
/* Get and validate l1. */
|
||||||
PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(this->tables[l0_index]);
|
PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(m_tables[l0_index]);
|
||||||
|
|
||||||
/* Check if there's nothing mapped at l1. */
|
/* Check if there's nothing mapped at l1. */
|
||||||
if (l1 == nullptr || !l1[l1_index].IsValid()) {
|
if (l1 == nullptr || !l1[l1_index].IsValid()) {
|
||||||
|
@ -1242,7 +1242,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Synchronize. */
|
/* Synchronize. */
|
||||||
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
|
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
|
||||||
InvalidateTlbSection(this->table_asids[l0_index], address);
|
InvalidateTlbSection(m_table_asids[l0_index], address);
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
|
|
||||||
/* We invalidated the tlb. */
|
/* We invalidated the tlb. */
|
||||||
|
@ -1254,7 +1254,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Invalidate the tlb if we haven't already. */
|
/* Invalidate the tlb if we haven't already. */
|
||||||
if (!invalidated_tlb) {
|
if (!invalidated_tlb) {
|
||||||
InvalidateTlbSection(this->table_asids[l0_index], address);
|
InvalidateTlbSection(m_table_asids[l0_index], address);
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1275,7 +1275,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Synchronize. */
|
/* Synchronize. */
|
||||||
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
|
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
|
||||||
InvalidateTlbSection(this->table_asids[l0_index], address);
|
InvalidateTlbSection(m_table_asids[l0_index], address);
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
|
|
||||||
/* Close references. */
|
/* Close references. */
|
||||||
|
@ -1305,7 +1305,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
|
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
|
||||||
|
|
||||||
/* Get and validate l1. */
|
/* Get and validate l1. */
|
||||||
const PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(this->tables[l0_index]);
|
const PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(m_tables[l0_index]);
|
||||||
R_UNLESS(l1 != nullptr, svc::ResultInvalidCurrentMemory());
|
R_UNLESS(l1 != nullptr, svc::ResultInvalidCurrentMemory());
|
||||||
R_UNLESS(l1[l1_index].IsValid(), svc::ResultInvalidCurrentMemory());
|
R_UNLESS(l1[l1_index].IsValid(), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
|
|
@ -51,15 +51,15 @@ namespace ams::kern {
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
|
|
||||||
auto it = this->tree.nfind_light({ addr, -1 });
|
auto it = m_tree.nfind_light({ addr, -1 });
|
||||||
while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
|
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
|
||||||
KThread *target_thread = std::addressof(*it);
|
KThread *target_thread = std::addressof(*it);
|
||||||
target_thread->SetSyncedObject(nullptr, ResultSuccess());
|
target_thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||||
|
|
||||||
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
target_thread->Wakeup();
|
target_thread->Wakeup();
|
||||||
|
|
||||||
it = this->tree.erase(it);
|
it = m_tree.erase(it);
|
||||||
target_thread->ClearAddressArbiter();
|
target_thread->ClearAddressArbiter();
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
}
|
}
|
||||||
|
@ -78,15 +78,15 @@ namespace ams::kern {
|
||||||
R_UNLESS(UpdateIfEqual(std::addressof(user_value), addr, value, value + 1), svc::ResultInvalidCurrentMemory());
|
R_UNLESS(UpdateIfEqual(std::addressof(user_value), addr, value, value + 1), svc::ResultInvalidCurrentMemory());
|
||||||
R_UNLESS(user_value == value, svc::ResultInvalidState());
|
R_UNLESS(user_value == value, svc::ResultInvalidState());
|
||||||
|
|
||||||
auto it = this->tree.nfind_light({ addr, -1 });
|
auto it = m_tree.nfind_light({ addr, -1 });
|
||||||
while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
|
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
|
||||||
KThread *target_thread = std::addressof(*it);
|
KThread *target_thread = std::addressof(*it);
|
||||||
target_thread->SetSyncedObject(nullptr, ResultSuccess());
|
target_thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||||
|
|
||||||
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
target_thread->Wakeup();
|
target_thread->Wakeup();
|
||||||
|
|
||||||
it = this->tree.erase(it);
|
it = m_tree.erase(it);
|
||||||
target_thread->ClearAddressArbiter();
|
target_thread->ClearAddressArbiter();
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
}
|
}
|
||||||
|
@ -100,21 +100,21 @@ namespace ams::kern {
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
|
|
||||||
auto it = this->tree.nfind_light({ addr, -1 });
|
auto it = m_tree.nfind_light({ addr, -1 });
|
||||||
/* Determine the updated value. */
|
/* Determine the updated value. */
|
||||||
s32 new_value;
|
s32 new_value;
|
||||||
if (GetTargetFirmware() >= TargetFirmware_7_0_0) {
|
if (GetTargetFirmware() >= TargetFirmware_7_0_0) {
|
||||||
if (count <= 0) {
|
if (count <= 0) {
|
||||||
if ((it != this->tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
if ((it != m_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||||
new_value = value - 2;
|
new_value = value - 2;
|
||||||
} else {
|
} else {
|
||||||
new_value = value + 1;
|
new_value = value + 1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if ((it != this->tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
if ((it != m_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||||
auto tmp_it = it;
|
auto tmp_it = it;
|
||||||
s32 tmp_num_waiters = 0;
|
s32 tmp_num_waiters = 0;
|
||||||
while ((++tmp_it != this->tree.end()) && (tmp_it->GetAddressArbiterKey() == addr)) {
|
while ((++tmp_it != m_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr)) {
|
||||||
if ((tmp_num_waiters++) >= count) {
|
if ((tmp_num_waiters++) >= count) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -131,7 +131,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (count <= 0) {
|
if (count <= 0) {
|
||||||
if ((it != this->tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
if ((it != m_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||||
new_value = value - 1;
|
new_value = value - 1;
|
||||||
} else {
|
} else {
|
||||||
new_value = value + 1;
|
new_value = value + 1;
|
||||||
|
@ -139,7 +139,7 @@ namespace ams::kern {
|
||||||
} else {
|
} else {
|
||||||
auto tmp_it = it;
|
auto tmp_it = it;
|
||||||
s32 tmp_num_waiters = 0;
|
s32 tmp_num_waiters = 0;
|
||||||
while ((tmp_it != this->tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) && (tmp_num_waiters < count + 1)) {
|
while ((tmp_it != m_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) && (tmp_num_waiters < count + 1)) {
|
||||||
++tmp_num_waiters;
|
++tmp_num_waiters;
|
||||||
++tmp_it;
|
++tmp_it;
|
||||||
}
|
}
|
||||||
|
@ -166,14 +166,14 @@ namespace ams::kern {
|
||||||
R_UNLESS(succeeded, svc::ResultInvalidCurrentMemory());
|
R_UNLESS(succeeded, svc::ResultInvalidCurrentMemory());
|
||||||
R_UNLESS(user_value == value, svc::ResultInvalidState());
|
R_UNLESS(user_value == value, svc::ResultInvalidState());
|
||||||
|
|
||||||
while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
|
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
|
||||||
KThread *target_thread = std::addressof(*it);
|
KThread *target_thread = std::addressof(*it);
|
||||||
target_thread->SetSyncedObject(nullptr, ResultSuccess());
|
target_thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||||
|
|
||||||
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
target_thread->Wakeup();
|
target_thread->Wakeup();
|
||||||
|
|
||||||
it = this->tree.erase(it);
|
it = m_tree.erase(it);
|
||||||
target_thread->ClearAddressArbiter();
|
target_thread->ClearAddressArbiter();
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
}
|
}
|
||||||
|
@ -225,8 +225,8 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set the arbiter. */
|
/* Set the arbiter. */
|
||||||
cur_thread->SetAddressArbiter(std::addressof(this->tree), addr);
|
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
|
||||||
this->tree.insert(*cur_thread);
|
m_tree.insert(*cur_thread);
|
||||||
cur_thread->SetState(KThread::ThreadState_Waiting);
|
cur_thread->SetState(KThread::ThreadState_Waiting);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -240,7 +240,7 @@ namespace ams::kern {
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
|
|
||||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||||
this->tree.erase(this->tree.iterator_to(*cur_thread));
|
m_tree.erase(m_tree.iterator_to(*cur_thread));
|
||||||
cur_thread->ClearAddressArbiter();
|
cur_thread->ClearAddressArbiter();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -287,8 +287,8 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set the arbiter. */
|
/* Set the arbiter. */
|
||||||
cur_thread->SetAddressArbiter(std::addressof(this->tree), addr);
|
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
|
||||||
this->tree.insert(*cur_thread);
|
m_tree.insert(*cur_thread);
|
||||||
cur_thread->SetState(KThread::ThreadState_Waiting);
|
cur_thread->SetState(KThread::ThreadState_Waiting);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,7 +302,7 @@ namespace ams::kern {
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
|
|
||||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||||
this->tree.erase(this->tree.iterator_to(*cur_thread));
|
m_tree.erase(m_tree.iterator_to(*cur_thread));
|
||||||
cur_thread->ClearAddressArbiter();
|
cur_thread->ClearAddressArbiter();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
KAutoObject *KAutoObject::Create(KAutoObject *obj) {
|
KAutoObject *KAutoObject::Create(KAutoObject *obj) {
|
||||||
obj->ref_count = 1;
|
obj->m_ref_count = 1;
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,27 +21,27 @@ namespace ams::kern {
|
||||||
void KAutoObjectWithListContainer::Register(KAutoObjectWithList *obj) {
|
void KAutoObjectWithListContainer::Register(KAutoObjectWithList *obj) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
this->object_list.insert(*obj);
|
m_object_list.insert(*obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KAutoObjectWithListContainer::Unregister(KAutoObjectWithList *obj) {
|
void KAutoObjectWithListContainer::Unregister(KAutoObjectWithList *obj) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
this->object_list.erase(this->object_list.iterator_to(*obj));
|
m_object_list.erase(m_object_list.iterator_to(*obj));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t KAutoObjectWithListContainer::GetOwnedCount(KProcess *owner) {
|
size_t KAutoObjectWithListContainer::GetOwnedCount(KProcess *owner) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
|
|
||||||
for (auto &obj : this->object_list) {
|
for (auto &obj : m_object_list) {
|
||||||
if (obj.GetOwner() == owner) {
|
if (obj.GetOwner() == owner) {
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,15 +22,15 @@ namespace ams::kern {
|
||||||
/* Most fields have already been cleared by our constructor. */
|
/* Most fields have already been cleared by our constructor. */
|
||||||
|
|
||||||
/* Initial processes may run on all cores. */
|
/* Initial processes may run on all cores. */
|
||||||
this->core_mask = (1ul << cpu::NumCores) - 1;
|
m_core_mask = (1ul << cpu::NumCores) - 1;
|
||||||
|
|
||||||
/* Initial processes may use any user priority they like. */
|
/* Initial processes may use any user priority they like. */
|
||||||
this->priority_mask = ~0xFul;
|
m_priority_mask = ~0xFul;
|
||||||
|
|
||||||
/* Here, Nintendo sets the kernel version to the current kernel version. */
|
/* Here, Nintendo sets the kernel version to the current kernel version. */
|
||||||
/* We will follow suit and set the version to the highest supported kernel version. */
|
/* We will follow suit and set the version to the highest supported kernel version. */
|
||||||
this->intended_kernel_version.Set<KernelVersion::MajorVersion>(ams::svc::SupportedKernelMajorVersion);
|
m_intended_kernel_version.Set<KernelVersion::MajorVersion>(ams::svc::SupportedKernelMajorVersion);
|
||||||
this->intended_kernel_version.Set<KernelVersion::MinorVersion>(ams::svc::SupportedKernelMinorVersion);
|
m_intended_kernel_version.Set<KernelVersion::MinorVersion>(ams::svc::SupportedKernelMinorVersion);
|
||||||
|
|
||||||
/* Parse the capabilities array. */
|
/* Parse the capabilities array. */
|
||||||
return this->SetCapabilities(caps, num_caps, page_table);
|
return this->SetCapabilities(caps, num_caps, page_table);
|
||||||
|
@ -46,8 +46,8 @@ namespace ams::kern {
|
||||||
|
|
||||||
Result KCapabilities::SetCorePriorityCapability(const util::BitPack32 cap) {
|
Result KCapabilities::SetCorePriorityCapability(const util::BitPack32 cap) {
|
||||||
/* We can't set core/priority if we've already set them. */
|
/* We can't set core/priority if we've already set them. */
|
||||||
R_UNLESS(this->core_mask == 0, svc::ResultInvalidArgument());
|
R_UNLESS(m_core_mask == 0, svc::ResultInvalidArgument());
|
||||||
R_UNLESS(this->priority_mask == 0, svc::ResultInvalidArgument());
|
R_UNLESS(m_priority_mask == 0, svc::ResultInvalidArgument());
|
||||||
|
|
||||||
/* Validate the core/priority. */
|
/* Validate the core/priority. */
|
||||||
const auto min_core = cap.Get<CorePriority::MinimumCoreId>();
|
const auto min_core = cap.Get<CorePriority::MinimumCoreId>();
|
||||||
|
@ -64,18 +64,18 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Set core mask. */
|
/* Set core mask. */
|
||||||
for (auto core_id = min_core; core_id <= max_core; core_id++) {
|
for (auto core_id = min_core; core_id <= max_core; core_id++) {
|
||||||
this->core_mask |= (1ul << core_id);
|
m_core_mask |= (1ul << core_id);
|
||||||
}
|
}
|
||||||
MESOSPHERE_ASSERT((this->core_mask & ((1ul << cpu::NumCores) - 1)) == this->core_mask);
|
MESOSPHERE_ASSERT((m_core_mask & ((1ul << cpu::NumCores) - 1)) == m_core_mask);
|
||||||
|
|
||||||
/* Set priority mask. */
|
/* Set priority mask. */
|
||||||
for (auto prio = min_prio; prio <= max_prio; prio++) {
|
for (auto prio = min_prio; prio <= max_prio; prio++) {
|
||||||
this->priority_mask |= (1ul << prio);
|
m_priority_mask |= (1ul << prio);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We must have some core/priority we can use. */
|
/* We must have some core/priority we can use. */
|
||||||
R_UNLESS(this->core_mask != 0, svc::ResultInvalidArgument());
|
R_UNLESS(m_core_mask != 0, svc::ResultInvalidArgument());
|
||||||
R_UNLESS(this->priority_mask != 0, svc::ResultInvalidArgument());
|
R_UNLESS(m_priority_mask != 0, svc::ResultInvalidArgument());
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -186,17 +186,17 @@ namespace ams::kern {
|
||||||
/* Validate. */
|
/* Validate. */
|
||||||
R_UNLESS(cap.Get<ProgramType::Reserved>() == 0, svc::ResultReservedUsed());
|
R_UNLESS(cap.Get<ProgramType::Reserved>() == 0, svc::ResultReservedUsed());
|
||||||
|
|
||||||
this->program_type = cap.Get<ProgramType::Type>();
|
m_program_type = cap.Get<ProgramType::Type>();
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KCapabilities::SetKernelVersionCapability(const util::BitPack32 cap) {
|
Result KCapabilities::SetKernelVersionCapability(const util::BitPack32 cap) {
|
||||||
/* Ensure we haven't set our version before. */
|
/* Ensure we haven't set our version before. */
|
||||||
R_UNLESS(this->intended_kernel_version.Get<KernelVersion::MajorVersion>() == 0, svc::ResultInvalidArgument());
|
R_UNLESS(m_intended_kernel_version.Get<KernelVersion::MajorVersion>() == 0, svc::ResultInvalidArgument());
|
||||||
|
|
||||||
/* Set, ensure that we set a valid version. */
|
/* Set, ensure that we set a valid version. */
|
||||||
this->intended_kernel_version = cap;
|
m_intended_kernel_version = cap;
|
||||||
R_UNLESS(this->intended_kernel_version.Get<KernelVersion::MajorVersion>() != 0, svc::ResultInvalidArgument());
|
R_UNLESS(m_intended_kernel_version.Get<KernelVersion::MajorVersion>() != 0, svc::ResultInvalidArgument());
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -205,7 +205,7 @@ namespace ams::kern {
|
||||||
/* Validate. */
|
/* Validate. */
|
||||||
R_UNLESS(cap.Get<HandleTable::Reserved>() == 0, svc::ResultReservedUsed());
|
R_UNLESS(cap.Get<HandleTable::Reserved>() == 0, svc::ResultReservedUsed());
|
||||||
|
|
||||||
this->handle_table_size = cap.Get<HandleTable::Size>();
|
m_handle_table_size = cap.Get<HandleTable::Size>();
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,8 +213,8 @@ namespace ams::kern {
|
||||||
/* Validate. */
|
/* Validate. */
|
||||||
R_UNLESS(cap.Get<DebugFlags::Reserved>() == 0, svc::ResultReservedUsed());
|
R_UNLESS(cap.Get<DebugFlags::Reserved>() == 0, svc::ResultReservedUsed());
|
||||||
|
|
||||||
this->debug_capabilities.Set<DebugFlags::AllowDebug>(cap.Get<DebugFlags::AllowDebug>());
|
m_debug_capabilities.Set<DebugFlags::AllowDebug>(cap.Get<DebugFlags::AllowDebug>());
|
||||||
this->debug_capabilities.Set<DebugFlags::ForceDebug>(cap.Get<DebugFlags::ForceDebug>());
|
m_debug_capabilities.Set<DebugFlags::ForceDebug>(cap.Get<DebugFlags::ForceDebug>());
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,17 +19,17 @@ namespace ams::kern {
|
||||||
|
|
||||||
void KClientPort::Initialize(KPort *parent, s32 max_sessions) {
|
void KClientPort::Initialize(KPort *parent, s32 max_sessions) {
|
||||||
/* Set member variables. */
|
/* Set member variables. */
|
||||||
this->num_sessions = 0;
|
m_num_sessions = 0;
|
||||||
this->peak_sessions = 0;
|
m_peak_sessions = 0;
|
||||||
this->parent = parent;
|
m_parent = parent;
|
||||||
this->max_sessions = max_sessions;
|
m_max_sessions = max_sessions;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KClientPort::OnSessionFinalized() {
|
void KClientPort::OnSessionFinalized() {
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
|
|
||||||
const auto prev = this->num_sessions--;
|
const auto prev = m_num_sessions--;
|
||||||
if (prev == this->max_sessions) {
|
if (prev == m_max_sessions) {
|
||||||
this->NotifyAvailable();
|
this->NotifyAvailable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,15 +44,15 @@ namespace ams::kern {
|
||||||
|
|
||||||
void KClientPort::Destroy() {
|
void KClientPort::Destroy() {
|
||||||
/* Note with our parent that we're closed. */
|
/* Note with our parent that we're closed. */
|
||||||
this->parent->OnClientClosed();
|
m_parent->OnClientClosed();
|
||||||
|
|
||||||
/* Close our reference to our parent. */
|
/* Close our reference to our parent. */
|
||||||
this->parent->Close();
|
m_parent->Close();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KClientPort::IsSignaled() const {
|
bool KClientPort::IsSignaled() const {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
return this->num_sessions < this->max_sessions;
|
return m_num_sessions < m_max_sessions;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KClientPort::CreateSession(KClientSession **out) {
|
Result KClientPort::CreateSession(KClientSession **out) {
|
||||||
|
@ -67,23 +67,23 @@ namespace ams::kern {
|
||||||
/* Atomically increment the number of sessions. */
|
/* Atomically increment the number of sessions. */
|
||||||
s32 new_sessions;
|
s32 new_sessions;
|
||||||
{
|
{
|
||||||
const auto max = this->max_sessions;
|
const auto max = m_max_sessions;
|
||||||
auto cur_sessions = this->num_sessions.load(std::memory_order_acquire);
|
auto cur_sessions = m_num_sessions.load(std::memory_order_acquire);
|
||||||
do {
|
do {
|
||||||
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
|
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
|
||||||
new_sessions = cur_sessions + 1;
|
new_sessions = cur_sessions + 1;
|
||||||
} while (!this->num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed));
|
} while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Atomically update the peak session tracking. */
|
/* Atomically update the peak session tracking. */
|
||||||
{
|
{
|
||||||
auto peak = this->peak_sessions.load(std::memory_order_acquire);
|
auto peak = m_peak_sessions.load(std::memory_order_acquire);
|
||||||
do {
|
do {
|
||||||
if (peak >= new_sessions) {
|
if (peak >= new_sessions) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} while (!this->peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed));
|
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,8 +91,8 @@ namespace ams::kern {
|
||||||
KSession *session = KSession::Create();
|
KSession *session = KSession::Create();
|
||||||
if (session == nullptr) {
|
if (session == nullptr) {
|
||||||
/* Decrement the session count. */
|
/* Decrement the session count. */
|
||||||
const auto prev = this->num_sessions--;
|
const auto prev = m_num_sessions--;
|
||||||
if (prev == this->max_sessions) {
|
if (prev == m_max_sessions) {
|
||||||
this->NotifyAvailable();
|
this->NotifyAvailable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize the session. */
|
/* Initialize the session. */
|
||||||
session->Initialize(this, this->parent->GetName());
|
session->Initialize(this, m_parent->GetName());
|
||||||
|
|
||||||
/* Commit the session reservation. */
|
/* Commit the session reservation. */
|
||||||
session_reservation.Commit();
|
session_reservation.Commit();
|
||||||
|
@ -113,7 +113,7 @@ namespace ams::kern {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Enqueue the session with our parent. */
|
/* Enqueue the session with our parent. */
|
||||||
R_TRY(this->parent->EnqueueSession(std::addressof(session->GetServerSession())));
|
R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
|
||||||
|
|
||||||
/* We succeeded, so set the output. */
|
/* We succeeded, so set the output. */
|
||||||
session_guard.Cancel();
|
session_guard.Cancel();
|
||||||
|
@ -133,23 +133,23 @@ namespace ams::kern {
|
||||||
/* Atomically increment the number of sessions. */
|
/* Atomically increment the number of sessions. */
|
||||||
s32 new_sessions;
|
s32 new_sessions;
|
||||||
{
|
{
|
||||||
const auto max = this->max_sessions;
|
const auto max = m_max_sessions;
|
||||||
auto cur_sessions = this->num_sessions.load(std::memory_order_acquire);
|
auto cur_sessions = m_num_sessions.load(std::memory_order_acquire);
|
||||||
do {
|
do {
|
||||||
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
|
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
|
||||||
new_sessions = cur_sessions + 1;
|
new_sessions = cur_sessions + 1;
|
||||||
} while (!this->num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed));
|
} while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Atomically update the peak session tracking. */
|
/* Atomically update the peak session tracking. */
|
||||||
{
|
{
|
||||||
auto peak = this->peak_sessions.load(std::memory_order_acquire);
|
auto peak = m_peak_sessions.load(std::memory_order_acquire);
|
||||||
do {
|
do {
|
||||||
if (peak >= new_sessions) {
|
if (peak >= new_sessions) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} while (!this->peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed));
|
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,8 +157,8 @@ namespace ams::kern {
|
||||||
KLightSession *session = KLightSession::Create();
|
KLightSession *session = KLightSession::Create();
|
||||||
if (session == nullptr) {
|
if (session == nullptr) {
|
||||||
/* Decrement the session count. */
|
/* Decrement the session count. */
|
||||||
const auto prev = this->num_sessions--;
|
const auto prev = m_num_sessions--;
|
||||||
if (prev == this->max_sessions) {
|
if (prev == m_max_sessions) {
|
||||||
this->NotifyAvailable();
|
this->NotifyAvailable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize the session. */
|
/* Initialize the session. */
|
||||||
session->Initialize(this, this->parent->GetName());
|
session->Initialize(this, m_parent->GetName());
|
||||||
|
|
||||||
/* Commit the session reservation. */
|
/* Commit the session reservation. */
|
||||||
session_reservation.Commit();
|
session_reservation.Commit();
|
||||||
|
@ -179,7 +179,7 @@ namespace ams::kern {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Enqueue the session with our parent. */
|
/* Enqueue the session with our parent. */
|
||||||
R_TRY(this->parent->EnqueueSession(std::addressof(session->GetServerSession())));
|
R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
|
||||||
|
|
||||||
/* We succeeded, so set the output. */
|
/* We succeeded, so set the output. */
|
||||||
session_guard.Cancel();
|
session_guard.Cancel();
|
||||||
|
|
|
@ -20,8 +20,8 @@ namespace ams::kern {
|
||||||
void KClientSession::Destroy() {
|
void KClientSession::Destroy() {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
this->parent->OnClientClosed();
|
m_parent->OnClientClosed();
|
||||||
this->parent->Close();
|
m_parent->Close();
|
||||||
}
|
}
|
||||||
|
|
||||||
void KClientSession::OnServerClosed() {
|
void KClientSession::OnServerClosed() {
|
||||||
|
@ -45,7 +45,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
GetCurrentThread().SetSyncedObject(nullptr, ResultSuccess());
|
GetCurrentThread().SetSyncedObject(nullptr, ResultSuccess());
|
||||||
|
|
||||||
R_TRY(this->parent->OnRequest(request));
|
R_TRY(m_parent->OnRequest(request));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get the result. */
|
/* Get the result. */
|
||||||
|
@ -68,7 +68,7 @@ namespace ams::kern {
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
|
|
||||||
R_TRY(this->parent->OnRequest(request));
|
R_TRY(m_parent->OnRequest(request));
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
|
|
|
@ -21,31 +21,31 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
/* Set members. */
|
/* Set members. */
|
||||||
this->owner = GetCurrentProcessPointer();
|
m_owner = GetCurrentProcessPointer();
|
||||||
|
|
||||||
/* Initialize the page group. */
|
/* Initialize the page group. */
|
||||||
auto &page_table = this->owner->GetPageTable();
|
auto &page_table = m_owner->GetPageTable();
|
||||||
new (GetPointer(this->page_group)) KPageGroup(page_table.GetBlockInfoManager());
|
new (GetPointer(m_page_group)) KPageGroup(page_table.GetBlockInfoManager());
|
||||||
|
|
||||||
/* Ensure that our page group's state is valid on exit. */
|
/* Ensure that our page group's state is valid on exit. */
|
||||||
auto pg_guard = SCOPE_GUARD { GetReference(this->page_group).~KPageGroup(); };
|
auto pg_guard = SCOPE_GUARD { GetReference(m_page_group).~KPageGroup(); };
|
||||||
|
|
||||||
/* Lock the memory. */
|
/* Lock the memory. */
|
||||||
R_TRY(page_table.LockForCodeMemory(GetPointer(this->page_group), addr, size));
|
R_TRY(page_table.LockForCodeMemory(GetPointer(m_page_group), addr, size));
|
||||||
|
|
||||||
/* Clear the memory. */
|
/* Clear the memory. */
|
||||||
for (const auto &block : GetReference(this->page_group)) {
|
for (const auto &block : GetReference(m_page_group)) {
|
||||||
/* Clear and store cache. */
|
/* Clear and store cache. */
|
||||||
std::memset(GetVoidPointer(block.GetAddress()), 0xFF, block.GetSize());
|
std::memset(GetVoidPointer(block.GetAddress()), 0xFF, block.GetSize());
|
||||||
cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize());
|
cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set remaining tracking members. */
|
/* Set remaining tracking members. */
|
||||||
this->owner->Open();
|
m_owner->Open();
|
||||||
this->address = addr;
|
m_address = addr;
|
||||||
this->is_initialized = true;
|
m_is_initialized = true;
|
||||||
this->is_owner_mapped = false;
|
m_is_owner_mapped = false;
|
||||||
this->is_mapped = false;
|
m_is_mapped = false;
|
||||||
|
|
||||||
/* We succeeded. */
|
/* We succeeded. */
|
||||||
pg_guard.Cancel();
|
pg_guard.Cancel();
|
||||||
|
@ -56,17 +56,17 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
/* Unlock. */
|
/* Unlock. */
|
||||||
if (!this->is_mapped && !this->is_owner_mapped) {
|
if (!m_is_mapped && !m_is_owner_mapped) {
|
||||||
const size_t size = GetReference(this->page_group).GetNumPages() * PageSize;
|
const size_t size = GetReference(m_page_group).GetNumPages() * PageSize;
|
||||||
MESOSPHERE_R_ABORT_UNLESS(this->owner->GetPageTable().UnlockForCodeMemory(this->address, size, GetReference(this->page_group)));
|
MESOSPHERE_R_ABORT_UNLESS(m_owner->GetPageTable().UnlockForCodeMemory(m_address, size, GetReference(m_page_group)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Close the page group. */
|
/* Close the page group. */
|
||||||
GetReference(this->page_group).Close();
|
GetReference(m_page_group).Close();
|
||||||
GetReference(this->page_group).Finalize();
|
GetReference(m_page_group).Finalize();
|
||||||
|
|
||||||
/* Close our reference to our owner. */
|
/* Close our reference to our owner. */
|
||||||
this->owner->Close();
|
m_owner->Close();
|
||||||
|
|
||||||
/* Perform inherited finalization. */
|
/* Perform inherited finalization. */
|
||||||
KAutoObjectWithSlabHeapAndContainer<KCodeMemory, KAutoObjectWithList>::Finalize();
|
KAutoObjectWithSlabHeapAndContainer<KCodeMemory, KAutoObjectWithList>::Finalize();
|
||||||
|
@ -76,19 +76,19 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
/* Validate the size. */
|
/* Validate the size. */
|
||||||
R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
|
R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
|
||||||
|
|
||||||
/* Lock ourselves. */
|
/* Lock ourselves. */
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
/* Ensure we're not already mapped. */
|
/* Ensure we're not already mapped. */
|
||||||
R_UNLESS(!this->is_mapped, svc::ResultInvalidState());
|
R_UNLESS(!m_is_mapped, svc::ResultInvalidState());
|
||||||
|
|
||||||
/* Map the memory. */
|
/* Map the memory. */
|
||||||
R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(this->page_group), KMemoryState_CodeOut, KMemoryPermission_UserReadWrite));
|
R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(m_page_group), KMemoryState_CodeOut, KMemoryPermission_UserReadWrite));
|
||||||
|
|
||||||
/* Mark ourselves as mapped. */
|
/* Mark ourselves as mapped. */
|
||||||
this->is_mapped = true;
|
m_is_mapped = true;
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -97,17 +97,17 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
/* Validate the size. */
|
/* Validate the size. */
|
||||||
R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
|
R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
|
||||||
|
|
||||||
/* Lock ourselves. */
|
/* Lock ourselves. */
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
/* Unmap the memory. */
|
/* Unmap the memory. */
|
||||||
R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(this->page_group), KMemoryState_CodeOut));
|
R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(m_page_group), KMemoryState_CodeOut));
|
||||||
|
|
||||||
/* Mark ourselves as unmapped. */
|
/* Mark ourselves as unmapped. */
|
||||||
MESOSPHERE_ASSERT(this->is_mapped);
|
MESOSPHERE_ASSERT(m_is_mapped);
|
||||||
this->is_mapped = false;
|
m_is_mapped = false;
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -116,13 +116,13 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
/* Validate the size. */
|
/* Validate the size. */
|
||||||
R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
|
R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
|
||||||
|
|
||||||
/* Lock ourselves. */
|
/* Lock ourselves. */
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
/* Ensure we're not already mapped. */
|
/* Ensure we're not already mapped. */
|
||||||
R_UNLESS(!this->is_owner_mapped, svc::ResultInvalidState());
|
R_UNLESS(!m_is_owner_mapped, svc::ResultInvalidState());
|
||||||
|
|
||||||
/* Convert the memory permission. */
|
/* Convert the memory permission. */
|
||||||
KMemoryPermission k_perm;
|
KMemoryPermission k_perm;
|
||||||
|
@ -133,10 +133,10 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map the memory. */
|
/* Map the memory. */
|
||||||
R_TRY(this->owner->GetPageTable().MapPageGroup(address, GetReference(this->page_group), KMemoryState_GeneratedCode, k_perm));
|
R_TRY(m_owner->GetPageTable().MapPageGroup(address, GetReference(m_page_group), KMemoryState_GeneratedCode, k_perm));
|
||||||
|
|
||||||
/* Mark ourselves as mapped. */
|
/* Mark ourselves as mapped. */
|
||||||
this->is_owner_mapped = true;
|
m_is_owner_mapped = true;
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -145,17 +145,17 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
/* Validate the size. */
|
/* Validate the size. */
|
||||||
R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
|
R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
|
||||||
|
|
||||||
/* Lock ourselves. */
|
/* Lock ourselves. */
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
/* Unmap the memory. */
|
/* Unmap the memory. */
|
||||||
R_TRY(this->owner->GetPageTable().UnmapPageGroup(address, GetReference(this->page_group), KMemoryState_GeneratedCode));
|
R_TRY(m_owner->GetPageTable().UnmapPageGroup(address, GetReference(m_page_group), KMemoryState_GeneratedCode));
|
||||||
|
|
||||||
/* Mark ourselves as unmapped. */
|
/* Mark ourselves as unmapped. */
|
||||||
MESOSPHERE_ASSERT(this->is_owner_mapped);
|
MESOSPHERE_ASSERT(m_is_owner_mapped);
|
||||||
this->is_owner_mapped = false;
|
m_is_owner_mapped = false;
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue