diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm/kern_generic_interrupt_controller.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm/kern_generic_interrupt_controller.hpp index 267cc8ad0..5ad242a2f 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm/kern_generic_interrupt_controller.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm/kern_generic_interrupt_controller.hpp @@ -135,10 +135,10 @@ namespace ams::kern::arch::arm { private: static inline u32 s_mask[cpu::NumCores]; private: - volatile GicDistributor *gicd; - volatile GicCpuInterface *gicc; + volatile GicDistributor *m_gicd; + volatile GicCpuInterface *m_gicc; public: - constexpr KInterruptController() : gicd(nullptr), gicc(nullptr) { /* ... */ } + constexpr KInterruptController() : m_gicd(nullptr), m_gicc(nullptr) { /* ... */ } void Initialize(s32 core_id); void Finalize(s32 core_id); @@ -149,7 +149,7 @@ namespace ams::kern::arch::arm { void RestoreGlobal(const GlobalState *state) const; public: u32 GetIrq() const { - return this->gicc->iar; + return m_gicc->iar; } static constexpr s32 ConvertRawIrq(u32 irq) { @@ -157,69 +157,69 @@ namespace ams::kern::arch::arm { } void Enable(s32 irq) const { - this->gicd->isenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32))); + m_gicd->isenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32))); } void Disable(s32 irq) const { - this->gicd->icenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32))); + m_gicd->icenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32))); } void Clear(s32 irq) const { - this->gicd->icpendr[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32))); + m_gicd->icpendr[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32))); } void SetTarget(s32 irq, s32 core_id) const { - this->gicd->itargetsr.bytes[irq] = this->gicd->itargetsr.bytes[irq] | GetGicMask(core_id); + m_gicd->itargetsr.bytes[irq] = m_gicd->itargetsr.bytes[irq] | GetGicMask(core_id); } void ClearTarget(s32 irq, s32 core_id) const { - this->gicd->itargetsr.bytes[irq] = this->gicd->itargetsr.bytes[irq] & ~GetGicMask(core_id); + m_gicd->itargetsr.bytes[irq] = m_gicd->itargetsr.bytes[irq] & ~GetGicMask(core_id); } void SetPriorityLevel(s32 irq, s32 level) const { MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low); - this->gicd->ipriorityr.bytes[irq] = ToGicPriorityValue(level); + m_gicd->ipriorityr.bytes[irq] = ToGicPriorityValue(level); } s32 GetPriorityLevel(s32 irq) const { - return FromGicPriorityValue(this->gicd->ipriorityr.bytes[irq]); + return FromGicPriorityValue(m_gicd->ipriorityr.bytes[irq]); } void SetPriorityLevel(s32 level) const { MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low); - this->gicc->pmr = ToGicPriorityValue(level); + m_gicc->pmr = ToGicPriorityValue(level); } void SetEdge(s32 irq) const { - u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)]; + u32 cfg = m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)]; cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2)))); cfg |= (0x2 << (2 * (irq % (BITSIZEOF(u32) / 2)))); - this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg; + m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg; } void SetLevel(s32 irq) const { - u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)]; + u32 cfg = m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)]; cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2)))); cfg |= (0x0 << (2 * (irq % (BITSIZEOF(u32) / 2)))); - this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg; + m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg; } void SendInterProcessorInterrupt(s32 irq, u64 core_mask) { MESOSPHERE_ASSERT(IsSoftware(irq)); - this->gicd->sgir = GetCpuTargetListMask(irq, core_mask); + m_gicd->sgir = GetCpuTargetListMask(irq, core_mask); } void SendInterProcessorInterrupt(s32 irq) { MESOSPHERE_ASSERT(IsSoftware(irq)); - this->gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq; + m_gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq; } void EndOfInterrupt(u32 irq) const { - this->gicc->eoir = irq; + m_gicc->eoir = irq; } bool IsInterruptDefined(s32 irq) const { - const s32 num_interrupts = std::min(32 + 32 * (this->gicd->typer & 0x1F), static_cast(NumInterrupts)); + const s32 num_interrupts = std::min(32 + 32 * (m_gicd->typer & 0x1F), static_cast(NumInterrupts)); return (0 <= irq && irq < num_interrupts); } public: @@ -270,7 +270,7 @@ namespace ams::kern::arch::arm { } ALWAYS_INLINE void SetGicMask(s32 core_id) const { - s_mask[core_id] = this->gicd->itargetsr.bytes[0]; + s_mask[core_id] = m_gicd->itargetsr.bytes[0]; } NOINLINE void SetupInterruptLines(s32 core_id) const; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index c3a700569..269858fb8 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -44,16 +44,16 @@ namespace ams::kern::arch::arm64::init { struct NoClear{}; private: - KPhysicalAddress l1_table; + KPhysicalAddress m_l1_table; public: - constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : l1_table(l1) { /* ... */ } + constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : m_l1_table(l1) { /* ... */ } constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) { - ClearNewPageTable(this->l1_table); + ClearNewPageTable(m_l1_table); } constexpr ALWAYS_INLINE uintptr_t GetL1TableAddress() const { - return GetInteger(this->l1_table); + return GetInteger(m_l1_table); } private: static constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KPhysicalAddress _l1_table, KVirtualAddress address) { @@ -83,7 +83,7 @@ namespace ams::kern::arch::arm64::init { const KVirtualAddress end_virt_addr = virt_addr + size; size_t count = 0; while (virt_addr < end_virt_addr) { - L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr); /* If an L1 block is mapped or we're empty, advance by L1BlockSize. */ if (l1_entry->IsBlock() || l1_entry->IsEmpty()) { @@ -137,7 +137,7 @@ namespace ams::kern::arch::arm64::init { const KVirtualAddress end_virt_addr = virt_addr + size; size_t count = 0; while (virt_addr < end_virt_addr) { - L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr); /* If an L1 block is mapped or we're empty, advance by L1BlockSize. */ if (l1_entry->IsBlock() || l1_entry->IsEmpty()) { @@ -194,7 +194,7 @@ namespace ams::kern::arch::arm64::init { } PageTableEntry *GetMappingEntry(KVirtualAddress virt_addr, size_t block_size) { - L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr); if (l1_entry->IsBlock()) { MESOSPHERE_INIT_ABORT_UNLESS(block_size == L1BlockSize); @@ -301,7 +301,7 @@ namespace ams::kern::arch::arm64::init { /* Iteratively map pages until the requested region is mapped. */ while (size > 0) { - L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr); /* Can we make an L1 block? */ if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) { @@ -382,7 +382,7 @@ namespace ams::kern::arch::arm64::init { KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const { /* Get the L1 entry. */ - const L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + const L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr); if (l1_entry->IsBlock()) { return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1)); @@ -444,7 +444,7 @@ namespace ams::kern::arch::arm64::init { }; while (virt_addr < end_virt_addr) { - L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr); /* If an L1 block is mapped, update. */ if (l1_entry->IsBlock()) { @@ -485,7 +485,7 @@ namespace ams::kern::arch::arm64::init { const KVirtualAddress end_virt_addr = virt_addr + size; while (virt_addr < end_virt_addr) { - L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr); /* If an L1 block is mapped, the address isn't free. */ if (l1_entry->IsBlock()) { @@ -534,7 +534,7 @@ namespace ams::kern::arch::arm64::init { /* Iteratively reprotect pages until the requested region is reprotected. */ while (size > 0) { - L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr); /* Check if an L1 block is present. */ if (l1_entry->IsBlock()) { @@ -680,43 +680,43 @@ namespace ams::kern::arch::arm64::init { uintptr_t free_bitmap; }; private: - State state; + State m_state; public: - constexpr ALWAYS_INLINE KInitialPageAllocator() : state{} { /* ... */ } + constexpr ALWAYS_INLINE KInitialPageAllocator() : m_state{} { /* ... */ } ALWAYS_INLINE void Initialize(uintptr_t address) { - this->state.next_address = address + BITSIZEOF(this->state.free_bitmap) * PageSize; - this->state.free_bitmap = ~uintptr_t(); + m_state.next_address = address + BITSIZEOF(m_state.free_bitmap) * PageSize; + m_state.free_bitmap = ~uintptr_t(); } ALWAYS_INLINE void InitializeFromState(uintptr_t state_val) { if (kern::GetTargetFirmware() >= ams::TargetFirmware_10_0_0) { - this->state = *reinterpret_cast(state_val); + m_state = *reinterpret_cast(state_val); } else { - this->state.next_address = state_val; - this->state.free_bitmap = 0; + m_state.next_address = state_val; + m_state.free_bitmap = 0; } } ALWAYS_INLINE void GetFinalState(State *out) { - *out = this->state; - this->state = {}; + *out = m_state; + m_state = {}; } public: virtual KPhysicalAddress Allocate() override { - MESOSPHERE_INIT_ABORT_UNLESS(this->state.next_address != Null); - uintptr_t allocated = this->state.next_address; - if (this->state.free_bitmap != 0) { + MESOSPHERE_INIT_ABORT_UNLESS(m_state.next_address != Null); + uintptr_t allocated = m_state.next_address; + if (m_state.free_bitmap != 0) { u64 index; uintptr_t mask; do { - index = KSystemControl::Init::GenerateRandomRange(0, BITSIZEOF(this->state.free_bitmap) - 1); + index = KSystemControl::Init::GenerateRandomRange(0, BITSIZEOF(m_state.free_bitmap) - 1); mask = (static_cast(1) << index); - } while ((this->state.free_bitmap & mask) == 0); - this->state.free_bitmap &= ~mask; - allocated = this->state.next_address - ((BITSIZEOF(this->state.free_bitmap) - index) * PageSize); + } while ((m_state.free_bitmap & mask) == 0); + m_state.free_bitmap &= ~mask; + allocated = m_state.next_address - ((BITSIZEOF(m_state.free_bitmap) - index) * PageSize); } else { - this->state.next_address += PageSize; + m_state.next_address += PageSize; } ClearPhysicalMemory(allocated, PageSize); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp index eb858242b..2d4410f84 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp @@ -135,36 +135,36 @@ namespace ams::kern::arch::arm64::cpu { NON_COPYABLE(GenericRegisterAccessorBase); NON_MOVEABLE(GenericRegisterAccessorBase); private: - u64 value; + u64 m_value; public: - constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : value(v) { /* ... */ } + constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : m_value(v) { /* ... */ } protected: constexpr ALWAYS_INLINE u64 GetValue() const { - return this->value; + return m_value; } constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { - return (this->value >> offset) & ((1ul << count) - 1); + return (m_value >> offset) & ((1ul << count) - 1); } constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) { const u64 mask = ((1ul << count) - 1) << offset; - this->value &= ~mask; - this->value |= (value & (mask >> offset)) << offset; + m_value &= ~mask; + m_value |= (value & (mask >> offset)) << offset; } constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) { const u64 mask = ((1ul << count) - 1) << offset; - this->value &= ~mask; - this->value |= (value & mask); + m_value &= ~mask; + m_value |= (value & mask); } constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) { const u64 mask = 1ul << offset; if (enabled) { - this->value |= mask; + m_value |= mask; } else { - this->value &= ~mask; + m_value &= ~mask; } } }; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp index 2f805ca54..2a6c46084 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp @@ -21,9 +21,9 @@ namespace ams::kern::arch::arm64 { class KHardwareTimer : public KInterruptTask, public KHardwareTimerBase { private: - s64 maximum_time; + s64 m_maximum_time; public: - constexpr KHardwareTimer() : KInterruptTask(), KHardwareTimerBase(), maximum_time(std::numeric_limits::max()) { /* ... */ } + constexpr KHardwareTimer() : KInterruptTask(), KHardwareTimerBase(), m_maximum_time(std::numeric_limits::max()) { /* ... */ } public: /* Public API. */ NOINLINE void Initialize(); @@ -38,7 +38,7 @@ namespace ams::kern::arch::arm64 { KScopedSpinLock lk(this->GetLock()); if (this->RegisterAbsoluteTaskImpl(task, task_time)) { - if (task_time <= this->maximum_time) { + if (task_time <= m_maximum_time) { SetCompareValue(task_time); EnableInterrupt(); } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp index 50a25ad63..235ae00f2 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp @@ -47,18 +47,18 @@ namespace ams::kern::arch::arm64 { constexpr KGlobalInterruptEntry() : handler(nullptr), manually_cleared(false), needs_clear(false) { /* ... */ } }; private: - KCoreLocalInterruptEntry core_local_interrupts[cpu::NumCores][KInterruptController::NumLocalInterrupts]{}; - KInterruptController interrupt_controller{}; - KInterruptController::LocalState local_states[cpu::NumCores]{}; - bool local_state_saved[cpu::NumCores]{}; - mutable KSpinLock global_interrupt_lock{}; - KGlobalInterruptEntry global_interrupts[KInterruptController::NumGlobalInterrupts]{}; - KInterruptController::GlobalState global_state{}; - bool global_state_saved{}; + KCoreLocalInterruptEntry m_core_local_interrupts[cpu::NumCores][KInterruptController::NumLocalInterrupts]{}; + KInterruptController m_interrupt_controller{}; + KInterruptController::LocalState m_local_states[cpu::NumCores]{}; + bool m_local_state_saved[cpu::NumCores]{}; + mutable KSpinLock m_global_interrupt_lock{}; + KGlobalInterruptEntry m_global_interrupts[KInterruptController::NumGlobalInterrupts]{}; + KInterruptController::GlobalState m_global_state{}; + bool m_global_state_saved{}; private: - ALWAYS_INLINE KSpinLock &GetGlobalInterruptLock() const { return this->global_interrupt_lock; } - ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return this->global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; } - ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return this->core_local_interrupts[GetCurrentCoreId()][KInterruptController::GetLocalInterruptIndex(irq)]; } + ALWAYS_INLINE KSpinLock &GetGlobalInterruptLock() const { return m_global_interrupt_lock; } + ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return m_global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; } + ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return m_core_local_interrupts[GetCurrentCoreId()][KInterruptController::GetLocalInterruptIndex(irq)]; } bool OnHandleInterrupt(); public: @@ -71,15 +71,15 @@ namespace ams::kern::arch::arm64 { NOINLINE void Restore(s32 core_id); bool IsInterruptDefined(s32 irq) const { - return this->interrupt_controller.IsInterruptDefined(irq); + return m_interrupt_controller.IsInterruptDefined(irq); } bool IsGlobal(s32 irq) const { - return this->interrupt_controller.IsGlobal(irq); + return m_interrupt_controller.IsGlobal(irq); } bool IsLocal(s32 irq) const { - return this->interrupt_controller.IsLocal(irq); + return m_interrupt_controller.IsLocal(irq); } NOINLINE Result BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level); @@ -89,11 +89,11 @@ namespace ams::kern::arch::arm64 { NOINLINE Result ClearInterrupt(s32 irq, s32 core_id); ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq, u64 core_mask) { - this->interrupt_controller.SendInterProcessorInterrupt(irq, core_mask); + m_interrupt_controller.SendInterProcessorInterrupt(irq, core_mask); } ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq) { - this->interrupt_controller.SendInterProcessorInterrupt(irq); + m_interrupt_controller.SendInterProcessorInterrupt(irq); } static void HandleInterrupt(bool user_mode); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 81c388044..6cd5008b3 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -92,15 +92,15 @@ namespace ams::kern::arch::arm64 { return KPageTable::GetBlockSize(static_cast(KPageTable::GetBlockType(alignment) + 1)); } private: - KPageTableManager *manager; - u64 ttbr; - u8 asid; + KPageTableManager *m_manager; + u64 m_ttbr; + u8 m_asid; protected: virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override; virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override; virtual void FinalizeUpdate(PageLinkedList *page_list) override; - KPageTableManager &GetPageTableManager() const { return *this->manager; } + KPageTableManager &GetPageTableManager() const { return *m_manager; } private: constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const { /* Set basic attributes. */ @@ -166,13 +166,13 @@ namespace ams::kern::arch::arm64 { return entry; } public: - constexpr KPageTable() : KPageTableBase(), manager(), ttbr(), asid() { /* ... */ } + constexpr KPageTable() : KPageTableBase(), m_manager(), m_ttbr(), m_asid() { /* ... */ } static NOINLINE void Initialize(s32 core_id); ALWAYS_INLINE void Activate(u32 proc_id) { cpu::DataSynchronizationBarrier(); - cpu::SwitchProcess(this->ttbr, proc_id); + cpu::SwitchProcess(m_ttbr, proc_id); } NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end); @@ -225,7 +225,7 @@ namespace ams::kern::arch::arm64 { } void OnTableUpdated() const { - cpu::InvalidateTlbByAsid(this->asid); + cpu::InvalidateTlbByAsid(m_asid); } void OnKernelTableUpdated() const { diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index 97ba26338..67e79074d 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -105,50 +105,50 @@ namespace ams::kern::arch::arm64 { ContigType_Contiguous = (0x1ul << 52), }; protected: - u64 attributes; + u64 m_attributes; public: /* Take in a raw attribute. */ - constexpr explicit ALWAYS_INLINE PageTableEntry() : attributes() { /* ... */ } - constexpr explicit ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ } + constexpr explicit ALWAYS_INLINE PageTableEntry() : m_attributes() { /* ... */ } + constexpr explicit ALWAYS_INLINE PageTableEntry(u64 attr) : m_attributes(attr) { /* ... */ } - constexpr explicit ALWAYS_INLINE PageTableEntry(InvalidTag) : attributes(0) { /* ... */ } + constexpr explicit ALWAYS_INLINE PageTableEntry(InvalidTag) : m_attributes(0) { /* ... */ } /* Extend a previous attribute. */ - constexpr explicit ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ } + constexpr explicit ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : m_attributes(rhs.m_attributes | new_attr) { /* ... */ } /* Construct a new attribute. */ constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m) - : attributes(static_cast(perm) | static_cast(AccessFlag_Accessed) | static_cast(p_a) | static_cast(share) | static_cast(ExtensionFlag_Valid) | static_cast(m)) + : m_attributes(static_cast(perm) | static_cast(AccessFlag_Accessed) | static_cast(p_a) | static_cast(share) | static_cast(ExtensionFlag_Valid) | static_cast(m)) { /* ... */ } protected: constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { - return (this->attributes >> offset) & ((1ul << count) - 1); + return (m_attributes >> offset) & ((1ul << count) - 1); } constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const { - return this->attributes & (((1ul << count) - 1) << offset); + return m_attributes & (((1ul << count) - 1) << offset); } constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) { const u64 mask = ((1ul << count) - 1) << offset; - this->attributes &= ~mask; - this->attributes |= (value & (mask >> offset)) << offset; + m_attributes &= ~mask; + m_attributes |= (value & (mask >> offset)) << offset; } constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) { const u64 mask = ((1ul << count) - 1) << offset; - this->attributes &= ~mask; - this->attributes |= (value & mask); + m_attributes &= ~mask; + m_attributes |= (value & mask); } constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) { const u64 mask = 1ul << offset; if (enabled) { - this->attributes |= mask; + m_attributes |= mask; } else { - this->attributes &= ~mask; + m_attributes &= ~mask; } } public: @@ -167,9 +167,9 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; } constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; } constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; } - constexpr ALWAYS_INLINE bool IsBlock() const { return (this->attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; } - constexpr ALWAYS_INLINE bool IsTable() const { return (this->attributes & ExtensionFlag_TestTableMask) == 2; } - constexpr ALWAYS_INLINE bool IsEmpty() const { return (this->attributes & ExtensionFlag_TestTableMask) == 0; } + constexpr ALWAYS_INLINE bool IsBlock() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; } + constexpr ALWAYS_INLINE bool IsTable() const { return (m_attributes & ExtensionFlag_TestTableMask) == 2; } + constexpr ALWAYS_INLINE bool IsEmpty() const { return (m_attributes & ExtensionFlag_TestTableMask) == 0; } constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; } constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; } @@ -185,21 +185,21 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const { constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); - return this->attributes & BaseMask; + return m_attributes & BaseMask; } constexpr ALWAYS_INLINE bool IsForMerge(u64 attr) const { constexpr u64 BaseMaskForMerge = ~static_cast(ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail); - return (this->attributes & BaseMaskForMerge) == attr; + return (m_attributes & BaseMaskForMerge) == attr; } constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const { - return this->attributes; + return m_attributes; } protected: constexpr ALWAYS_INLINE u64 GetRawAttributes() const { - return this->attributes; + return m_attributes; } }; @@ -262,7 +262,7 @@ namespace ams::kern::arch::arm64 { } constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const { - return this->attributes & GetEntryTemplateForL2BlockMask(idx); + return m_attributes & GetEntryTemplateForL2BlockMask(idx); } constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const { @@ -322,7 +322,7 @@ namespace ams::kern::arch::arm64 { } constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const { - return this->attributes & GetEntryTemplateForL2BlockMask(idx); + return m_attributes & GetEntryTemplateForL2BlockMask(idx); } static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3BlockMask(size_t idx) { @@ -339,7 +339,7 @@ namespace ams::kern::arch::arm64 { } constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const { - return this->attributes & GetEntryTemplateForL3BlockMask(idx); + return m_attributes & GetEntryTemplateForL3BlockMask(idx); } constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const { @@ -376,7 +376,7 @@ namespace ams::kern::arch::arm64 { } constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const { - return this->attributes & GetEntryTemplateForL3BlockMask(idx); + return m_attributes & GetEntryTemplateForL3BlockMask(idx); } constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const { diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index b82db110d..155affe26 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -77,16 +77,16 @@ namespace ams::kern::arch::arm64 { ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const; ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const; private: - L1PageTableEntry *table; - bool is_kernel; - u32 num_entries; + L1PageTableEntry *m_table; + bool m_is_kernel; + u32 m_num_entries; public: ALWAYS_INLINE KVirtualAddress GetTableEntry(KVirtualAddress table, size_t index) const { return table + index * sizeof(PageTableEntry); } ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KProcessAddress address) const { - return GetPointer(GetTableEntry(KVirtualAddress(this->table), GetL1Index(address) & (this->num_entries - 1))); + return GetPointer(GetTableEntry(KVirtualAddress(m_table), GetL1Index(address) & (m_num_entries - 1))); } ALWAYS_INLINE L2PageTableEntry *GetL2EntryFromTable(KVirtualAddress table, KProcessAddress address) const { @@ -105,7 +105,7 @@ namespace ams::kern::arch::arm64 { return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address); } public: - constexpr KPageTableImpl() : table(), is_kernel(), num_entries() { /* ... */ } + constexpr KPageTableImpl() : m_table(), m_is_kernel(), m_num_entries() { /* ... */ } NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end); NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp index ffeac1848..3bf4d6cee 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp @@ -21,274 +21,274 @@ namespace ams::kern::arch::arm64 { class KProcessPageTable { private: - KPageTable page_table; + KPageTable m_page_table; public: - constexpr KProcessPageTable() : page_table() { /* ... */ } + constexpr KProcessPageTable() : m_page_table() { /* ... */ } void Activate(u64 id) { /* Activate the page table with the specified contextidr. */ - this->page_table.Activate(id); + m_page_table.Activate(id); } Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) { - return this->page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager); + return m_page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager); } - void Finalize() { this->page_table.Finalize(); } + void Finalize() { m_page_table.Finalize(); } Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) { - return this->page_table.SetMemoryPermission(addr, size, perm); + return m_page_table.SetMemoryPermission(addr, size, perm); } Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) { - return this->page_table.SetProcessMemoryPermission(addr, size, perm); + return m_page_table.SetProcessMemoryPermission(addr, size, perm); } Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) { - return this->page_table.SetMemoryAttribute(addr, size, mask, attr); + return m_page_table.SetMemoryAttribute(addr, size, mask, attr); } Result SetHeapSize(KProcessAddress *out, size_t size) { - return this->page_table.SetHeapSize(out, size); + return m_page_table.SetHeapSize(out, size); } Result SetMaxHeapSize(size_t size) { - return this->page_table.SetMaxHeapSize(size); + return m_page_table.SetMaxHeapSize(size); } Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const { - return this->page_table.QueryInfo(out_info, out_page_info, addr); + return m_page_table.QueryInfo(out_info, out_page_info, addr); } Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const { - return this->page_table.QueryPhysicalAddress(out, address); + return m_page_table.QueryPhysicalAddress(out, address); } Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { - return this->page_table.QueryStaticMapping(out, address, size); + return m_page_table.QueryStaticMapping(out, address, size); } Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { - return this->page_table.QueryIoMapping(out, address, size); + return m_page_table.QueryIoMapping(out, address, size); } Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { - return this->page_table.MapMemory(dst_address, src_address, size); + return m_page_table.MapMemory(dst_address, src_address, size); } Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { - return this->page_table.UnmapMemory(dst_address, src_address, size); + return m_page_table.UnmapMemory(dst_address, src_address, size); } Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { - return this->page_table.MapCodeMemory(dst_address, src_address, size); + return m_page_table.MapCodeMemory(dst_address, src_address, size); } Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { - return this->page_table.UnmapCodeMemory(dst_address, src_address, size); + return m_page_table.UnmapCodeMemory(dst_address, src_address, size); } Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { - return this->page_table.MapIo(phys_addr, size, perm); + return m_page_table.MapIo(phys_addr, size, perm); } Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { - return this->page_table.MapStatic(phys_addr, size, perm); + return m_page_table.MapStatic(phys_addr, size, perm); } Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { - return this->page_table.MapRegion(region_type, perm); + return m_page_table.MapRegion(region_type, perm); } Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) { - return this->page_table.MapPageGroup(addr, pg, state, perm); + return m_page_table.MapPageGroup(addr, pg, state, perm); } Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) { - return this->page_table.UnmapPageGroup(address, pg, state); + return m_page_table.UnmapPageGroup(address, pg, state); } Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { - return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm); + return m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm); } Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) { - return this->page_table.MapPages(out_addr, num_pages, state, perm); + return m_page_table.MapPages(out_addr, num_pages, state, perm); } Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) { - return this->page_table.MapPages(address, num_pages, state, perm); + return m_page_table.MapPages(address, num_pages, state, perm); } Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) { - return this->page_table.UnmapPages(addr, num_pages, state); + return m_page_table.UnmapPages(addr, num_pages, state); } Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) { - return this->page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr); + return m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr); } Result InvalidateProcessDataCache(KProcessAddress address, size_t size) { - return this->page_table.InvalidateProcessDataCache(address, size); + return m_page_table.InvalidateProcessDataCache(address, size); } Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) { - return this->page_table.ReadDebugMemory(buffer, address, size); + return m_page_table.ReadDebugMemory(buffer, address, size); } Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) { - return this->page_table.WriteDebugMemory(address, buffer, size); + return m_page_table.WriteDebugMemory(address, buffer, size); } Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) { - return this->page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned); + return m_page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned); } Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { - return this->page_table.UnlockForDeviceAddressSpace(address, size); + return m_page_table.UnlockForDeviceAddressSpace(address, size); } Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) { - return this->page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size); + return m_page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size); } Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) { - return this->page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size); + return m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size); } Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) { - return this->page_table.LockForIpcUserBuffer(out, address, size); + return m_page_table.LockForIpcUserBuffer(out, address, size); } Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { - return this->page_table.UnlockForIpcUserBuffer(address, size); + return m_page_table.UnlockForIpcUserBuffer(address, size); } Result LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) { - return this->page_table.LockForTransferMemory(out, address, size, perm); + return m_page_table.LockForTransferMemory(out, address, size, perm); } Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) { - return this->page_table.UnlockForTransferMemory(address, size, pg); + return m_page_table.UnlockForTransferMemory(address, size, pg); } Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) { - return this->page_table.LockForCodeMemory(out, address, size); + return m_page_table.LockForCodeMemory(out, address, size); } Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) { - return this->page_table.UnlockForCodeMemory(address, size, pg); + return m_page_table.UnlockForCodeMemory(address, size, pg); } Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { - return this->page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); + return m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); } Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { - return this->page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); + return m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); } Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) { - return this->page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr); + return m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr); } Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) { - return this->page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr); + return m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr); } Result CopyMemoryFromHeapToHeap(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { - return this->page_table.CopyMemoryFromHeapToHeap(dst_page_table.page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); + return m_page_table.CopyMemoryFromHeapToHeap(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); } Result CopyMemoryFromHeapToHeapWithoutCheckDestination(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { - return this->page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); + return m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); } Result SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KProcessPageTable &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) { - return this->page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.page_table, test_perm, dst_state, send); + return m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table, test_perm, dst_state, send); } Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state, KProcess *server_process) { - return this->page_table.CleanupForIpcServer(address, size, dst_state, server_process); + return m_page_table.CleanupForIpcServer(address, size, dst_state, server_process); } Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) { - return this->page_table.CleanupForIpcClient(address, size, dst_state); + return m_page_table.CleanupForIpcClient(address, size, dst_state); } Result MapPhysicalMemory(KProcessAddress address, size_t size) { - return this->page_table.MapPhysicalMemory(address, size); + return m_page_table.MapPhysicalMemory(address, size); } Result UnmapPhysicalMemory(KProcessAddress address, size_t size) { - return this->page_table.UnmapPhysicalMemory(address, size); + return m_page_table.UnmapPhysicalMemory(address, size); } Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { - return this->page_table.MapPhysicalMemoryUnsafe(address, size); + return m_page_table.MapPhysicalMemoryUnsafe(address, size); } Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { - return this->page_table.UnmapPhysicalMemoryUnsafe(address, size); + return m_page_table.UnmapPhysicalMemoryUnsafe(address, size); } void DumpMemoryBlocks() const { - return this->page_table.DumpMemoryBlocks(); + return m_page_table.DumpMemoryBlocks(); } void DumpPageTable() const { - return this->page_table.DumpPageTable(); + return m_page_table.DumpPageTable(); } size_t CountPageTables() const { - return this->page_table.CountPageTables(); + return m_page_table.CountPageTables(); } bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { - return this->page_table.GetPhysicalAddress(out, address); + return m_page_table.GetPhysicalAddress(out, address); } - bool Contains(KProcessAddress addr, size_t size) const { return this->page_table.Contains(addr, size); } + bool Contains(KProcessAddress addr, size_t size) const { return m_page_table.Contains(addr, size); } - bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInAliasRegion(addr, size); } - bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInUnsafeAliasRegion(addr, size); } + bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInAliasRegion(addr, size); } + bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInUnsafeAliasRegion(addr, size); } - bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->page_table.CanContain(addr, size, state); } + bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return m_page_table.CanContain(addr, size, state); } - KProcessAddress GetAddressSpaceStart() const { return this->page_table.GetAddressSpaceStart(); } - KProcessAddress GetHeapRegionStart() const { return this->page_table.GetHeapRegionStart(); } - KProcessAddress GetAliasRegionStart() const { return this->page_table.GetAliasRegionStart(); } - KProcessAddress GetStackRegionStart() const { return this->page_table.GetStackRegionStart(); } - KProcessAddress GetKernelMapRegionStart() const { return this->page_table.GetKernelMapRegionStart(); } - KProcessAddress GetAliasCodeRegionStart() const { return this->page_table.GetAliasCodeRegionStart(); } + KProcessAddress GetAddressSpaceStart() const { return m_page_table.GetAddressSpaceStart(); } + KProcessAddress GetHeapRegionStart() const { return m_page_table.GetHeapRegionStart(); } + KProcessAddress GetAliasRegionStart() const { return m_page_table.GetAliasRegionStart(); } + KProcessAddress GetStackRegionStart() const { return m_page_table.GetStackRegionStart(); } + KProcessAddress GetKernelMapRegionStart() const { return m_page_table.GetKernelMapRegionStart(); } + KProcessAddress GetAliasCodeRegionStart() const { return m_page_table.GetAliasCodeRegionStart(); } - size_t GetAddressSpaceSize() const { return this->page_table.GetAddressSpaceSize(); } - size_t GetHeapRegionSize() const { return this->page_table.GetHeapRegionSize(); } - size_t GetAliasRegionSize() const { return this->page_table.GetAliasRegionSize(); } - size_t GetStackRegionSize() const { return this->page_table.GetStackRegionSize(); } - size_t GetKernelMapRegionSize() const { return this->page_table.GetKernelMapRegionSize(); } - size_t GetAliasCodeRegionSize() const { return this->page_table.GetAliasCodeRegionSize(); } + size_t GetAddressSpaceSize() const { return m_page_table.GetAddressSpaceSize(); } + size_t GetHeapRegionSize() const { return m_page_table.GetHeapRegionSize(); } + size_t GetAliasRegionSize() const { return m_page_table.GetAliasRegionSize(); } + size_t GetStackRegionSize() const { return m_page_table.GetStackRegionSize(); } + size_t GetKernelMapRegionSize() const { return m_page_table.GetKernelMapRegionSize(); } + size_t GetAliasCodeRegionSize() const { return m_page_table.GetAliasCodeRegionSize(); } - size_t GetNormalMemorySize() const { return this->page_table.GetNormalMemorySize(); } + size_t GetNormalMemorySize() const { return m_page_table.GetNormalMemorySize(); } - size_t GetCodeSize() const { return this->page_table.GetCodeSize(); } - size_t GetCodeDataSize() const { return this->page_table.GetCodeDataSize(); } + size_t GetCodeSize() const { return m_page_table.GetCodeSize(); } + size_t GetCodeDataSize() const { return m_page_table.GetCodeDataSize(); } - size_t GetAliasCodeSize() const { return this->page_table.GetAliasCodeSize(); } - size_t GetAliasCodeDataSize() const { return this->page_table.GetAliasCodeDataSize(); } + size_t GetAliasCodeSize() const { return m_page_table.GetAliasCodeSize(); } + size_t GetAliasCodeDataSize() const { return m_page_table.GetAliasCodeDataSize(); } - u32 GetAllocateOption() const { return this->page_table.GetAllocateOption(); } + u32 GetAllocateOption() const { return m_page_table.GetAllocateOption(); } KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) const { - return this->page_table.GetHeapPhysicalAddress(address); + return m_page_table.GetHeapPhysicalAddress(address); } KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) const { - return this->page_table.GetHeapVirtualAddress(address); + return m_page_table.GetHeapVirtualAddress(address); } KBlockInfoManager *GetBlockInfoManager() { - return this->page_table.GetBlockInfoManager(); + return m_page_table.GetBlockInfoManager(); } }; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp index c6bcc85ce..d973d89f1 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp @@ -21,19 +21,19 @@ namespace ams::kern::arch::arm64 { class KNotAlignedSpinLock { private: - u32 packed_tickets; + u32 m_packed_tickets; public: - constexpr KNotAlignedSpinLock() : packed_tickets(0) { /* ... */ } + constexpr KNotAlignedSpinLock() : m_packed_tickets(0) { /* ... */ } ALWAYS_INLINE void Lock() { u32 tmp0, tmp1, tmp2; __asm__ __volatile__( - " prfm pstl1keep, %[packed_tickets]\n" + " prfm pstl1keep, %[m_packed_tickets]\n" "1:\n" - " ldaxr %w[tmp0], %[packed_tickets]\n" + " ldaxr %w[tmp0], %[m_packed_tickets]\n" " add %w[tmp2], %w[tmp0], #0x10000\n" - " stxr %w[tmp1], %w[tmp2], %[packed_tickets]\n" + " stxr %w[tmp1], %w[tmp2], %[m_packed_tickets]\n" " cbnz %w[tmp1], 1b\n" " \n" " and %w[tmp1], %w[tmp0], #0xFFFF\n" @@ -42,21 +42,21 @@ namespace ams::kern::arch::arm64 { " sevl\n" "2:\n" " wfe\n" - " ldaxrh %w[tmp1], %[packed_tickets]\n" + " ldaxrh %w[tmp1], %[m_packed_tickets]\n" " cmp %w[tmp1], %w[tmp0], lsr #16\n" " b.ne 2b\n" "3:\n" - : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [packed_tickets]"+Q"(this->packed_tickets) + : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [m_packed_tickets]"+Q"(m_packed_tickets) : : "cc", "memory" ); } ALWAYS_INLINE void Unlock() { - const u32 value = this->packed_tickets + 1; + const u32 value = m_packed_tickets + 1; __asm__ __volatile__( - " stlrh %w[value], %[packed_tickets]\n" - : [packed_tickets]"+Q"(this->packed_tickets) + " stlrh %w[value], %[m_packed_tickets]\n" + : [m_packed_tickets]"+Q"(m_packed_tickets) : [value]"r"(value) : "memory" ); @@ -66,39 +66,39 @@ namespace ams::kern::arch::arm64 { class KAlignedSpinLock { private: - alignas(cpu::DataCacheLineSize) u16 current_ticket; - alignas(cpu::DataCacheLineSize) u16 next_ticket; + alignas(cpu::DataCacheLineSize) u16 m_current_ticket; + alignas(cpu::DataCacheLineSize) u16 m_next_ticket; public: - constexpr KAlignedSpinLock() : current_ticket(0), next_ticket(0) { /* ... */ } + constexpr KAlignedSpinLock() : m_current_ticket(0), m_next_ticket(0) { /* ... */ } ALWAYS_INLINE void Lock() { u32 tmp0, tmp1, got_lock; __asm__ __volatile__( - " prfm pstl1keep, %[next_ticket]\n" + " prfm pstl1keep, %[m_next_ticket]\n" "1:\n" - " ldaxrh %w[tmp0], %[next_ticket]\n" + " ldaxrh %w[tmp0], %[m_next_ticket]\n" " add %w[tmp1], %w[tmp0], #0x1\n" - " stxrh %w[got_lock], %w[tmp1], %[next_ticket]\n" + " stxrh %w[got_lock], %w[tmp1], %[m_next_ticket]\n" " cbnz %w[got_lock], 1b\n" " \n" " sevl\n" "2:\n" " wfe\n" - " ldaxrh %w[tmp1], %[current_ticket]\n" + " ldaxrh %w[tmp1], %[m_current_ticket]\n" " cmp %w[tmp1], %w[tmp0]\n" " b.ne 2b\n" - : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [next_ticket]"+Q"(this->next_ticket) - : [current_ticket]"Q"(this->current_ticket) + : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [m_next_ticket]"+Q"(m_next_ticket) + : [m_current_ticket]"Q"(m_current_ticket) : "cc", "memory" ); } ALWAYS_INLINE void Unlock() { - const u32 value = this->current_ticket + 1; + const u32 value = m_current_ticket + 1; __asm__ __volatile__( - " stlrh %w[value], %[current_ticket]\n" - : [current_ticket]"+Q"(this->current_ticket) + " stlrh %w[value], %[m_current_ticket]\n" + : [m_current_ticket]"+Q"(m_current_ticket) : [value]"r"(value) : "memory" ); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp index 5cd8b0d90..8d74be2bc 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp @@ -22,16 +22,16 @@ namespace ams::kern::arch::arm64 { class KSupervisorPageTable { private: - KPageTable page_table; - u64 ttbr0_identity[cpu::NumCores]; + KPageTable m_page_table; + u64 m_ttbr0_identity[cpu::NumCores]; public: - constexpr KSupervisorPageTable() : page_table(), ttbr0_identity() { /* ... */ } + constexpr KSupervisorPageTable() : m_page_table(), m_ttbr0_identity() { /* ... */ } NOINLINE void Initialize(s32 core_id); void Activate() { /* Activate, using process id = 0xFFFFFFFF */ - this->page_table.Activate(0xFFFFFFFF); + m_page_table.Activate(0xFFFFFFFF); } void ActivateForInit() { @@ -42,37 +42,37 @@ namespace ams::kern::arch::arm64 { } Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { - return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm); + return m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm); } Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { - return this->page_table.UnmapPages(address, num_pages, state); + return m_page_table.UnmapPages(address, num_pages, state); } Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { - return this->page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm); + return m_page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm); } Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) { - return this->page_table.UnmapPageGroup(address, pg, state); + return m_page_table.UnmapPageGroup(address, pg, state); } bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { - return this->page_table.GetPhysicalAddress(out, address); + return m_page_table.GetPhysicalAddress(out, address); } - constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return this->ttbr0_identity[core_id]; } + constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return m_ttbr0_identity[core_id]; } void DumpMemoryBlocks() const { - return this->page_table.DumpMemoryBlocks(); + return m_page_table.DumpMemoryBlocks(); } void DumpPageTable() const { - return this->page_table.DumpPageTable(); + return m_page_table.DumpPageTable(); } size_t CountPageTables() const { - return this->page_table.CountPageTables(); + return m_page_table.CountPageTables(); } }; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp index 6a123eee5..d2de2c88f 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp @@ -45,19 +45,19 @@ namespace ams::kern::arch::arm64 { u64 x28; u64 x29; }; - } callee_saved; - u64 lr; - u64 sp; - u64 cpacr; - u64 fpcr; - u64 fpsr; - alignas(0x10) u128 fpu_registers[NumFpuRegisters]; - bool locked; + } m_callee_saved; + u64 m_lr; + u64 m_sp; + u64 m_cpacr; + u64 m_fpcr; + u64 m_fpsr; + alignas(0x10) u128 m_fpu_registers[NumFpuRegisters]; + bool m_locked; private: static void RestoreFpuRegisters64(const KThreadContext &); static void RestoreFpuRegisters32(const KThreadContext &); public: - constexpr explicit KThreadContext() : callee_saved(), lr(), sp(), cpacr(), fpcr(), fpsr(), fpu_registers(), locked() { /* ... */ } + constexpr explicit KThreadContext() : m_callee_saved(), m_lr(), m_sp(), m_cpacr(), m_fpcr(), m_fpsr(), m_fpu_registers(), m_locked() { /* ... */ } Result Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main); Result Finalize(); @@ -66,17 +66,17 @@ namespace ams::kern::arch::arm64 { static void FpuContextSwitchHandler(KThread *thread); - u32 GetFpcr() const { return this->fpcr; } - u32 GetFpsr() const { return this->fpsr; } + u32 GetFpcr() const { return m_fpcr; } + u32 GetFpsr() const { return m_fpsr; } - void SetFpcr(u32 v) { this->fpcr = v; } - void SetFpsr(u32 v) { this->fpsr = v; } + void SetFpcr(u32 v) { m_fpcr = v; } + void SetFpsr(u32 v) { m_fpsr = v; } void CloneFpuStatus(); void SetFpuRegisters(const u128 *v, bool is_64_bit); - const u128 *GetFpuRegisters() const { return this->fpu_registers; } + const u128 *GetFpuRegisters() const { return m_fpu_registers; } public: static void OnThreadTerminating(const KThread *thread); }; diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp index c86cf1778..21814b198 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp @@ -27,13 +27,13 @@ namespace ams::kern::board::nintendo::nx { private: static constexpr size_t TableCount = 4; private: - KVirtualAddress tables[TableCount]; - u8 table_asids[TableCount]; - u64 attached_device; - u32 attached_value; - u32 detached_value; - u32 hs_attached_value; - u32 hs_detached_value; + KVirtualAddress m_tables[TableCount]; + u8 m_table_asids[TableCount]; + u64 m_attached_device; + u32 m_attached_value; + u32 m_detached_value; + u32 m_hs_attached_value; + u32 m_hs_detached_value; private: static ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress addr) { const KMemoryRegion *hint = nullptr; @@ -61,7 +61,7 @@ namespace ams::kern::board::nintendo::nx { return KPageTable::GetPageTablePhysicalAddress(addr); } public: - constexpr KDevicePageTable() : tables(), table_asids(), attached_device(), attached_value(), detached_value(), hs_attached_value(), hs_detached_value() { /* ... */ } + constexpr KDevicePageTable() : m_tables(), m_table_asids(), m_attached_device(), m_attached_value(), m_detached_value(), m_hs_attached_value(), m_hs_detached_value() { /* ... */ } Result Initialize(u64 space_address, u64 space_size); void Finalize(); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp index 14f23b4e4..1157e62ba 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_address_arbiter.hpp @@ -23,9 +23,9 @@ namespace ams::kern { public: using ThreadTree = KConditionVariable::ThreadTree; private: - ThreadTree tree; + ThreadTree m_tree; public: - constexpr KAddressArbiter() : tree() { /* ... */ } + constexpr KAddressArbiter() : m_tree() { /* ... */ } Result SignalToAddress(uintptr_t addr, ams::svc::SignalType type, s32 value, s32 count) { switch (type) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp index afe704cfa..51fcbaf02 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp @@ -32,20 +32,20 @@ namespace ams::kern { Type_Count, }; private: - size_t bit_width; - size_t address; - size_t size; - Type type; + size_t m_bit_width; + size_t m_address; + size_t m_size; + Type m_type; public: static uintptr_t GetAddressSpaceStart(size_t width, Type type); static size_t GetAddressSpaceSize(size_t width, Type type); - constexpr KAddressSpaceInfo(size_t bw, size_t a, size_t s, Type t) : bit_width(bw), address(a), size(s), type(t) { /* ... */ } + constexpr KAddressSpaceInfo(size_t bw, size_t a, size_t s, Type t) : m_bit_width(bw), m_address(a), m_size(s), m_type(t) { /* ... */ } - constexpr size_t GetWidth() const { return this->bit_width; } - constexpr size_t GetAddress() const { return this->address; } - constexpr size_t GetSize() const { return this->size; } - constexpr Type GetType() const { return this->type; } + constexpr size_t GetWidth() const { return m_bit_width; } + constexpr size_t GetAddress() const { return m_address; } + constexpr size_t GetSize() const { return m_size; } + constexpr Type GetType() const { return m_type; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp index a3a263fba..5ddf32459 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp @@ -23,38 +23,38 @@ namespace ams::kern { private: static constexpr u64 AllowedAffinityMask = (1ul << cpu::NumCores) - 1; private: - u64 mask; + u64 m_mask; private: static constexpr ALWAYS_INLINE u64 GetCoreBit(s32 core) { MESOSPHERE_ASSERT(0 <= core && core < static_cast(cpu::NumCores)); return (1ul << core); } public: - constexpr ALWAYS_INLINE KAffinityMask() : mask(0) { MESOSPHERE_ASSERT_THIS(); } + constexpr ALWAYS_INLINE KAffinityMask() : m_mask(0) { MESOSPHERE_ASSERT_THIS(); } - constexpr ALWAYS_INLINE u64 GetAffinityMask() const { return this->mask; } + constexpr ALWAYS_INLINE u64 GetAffinityMask() const { return m_mask; } constexpr ALWAYS_INLINE void SetAffinityMask(u64 new_mask) { MESOSPHERE_ASSERT((new_mask & ~AllowedAffinityMask) == 0); - this->mask = new_mask; + m_mask = new_mask; } constexpr ALWAYS_INLINE bool GetAffinity(s32 core) const { - return this->mask & GetCoreBit(core); + return m_mask & GetCoreBit(core); } constexpr ALWAYS_INLINE void SetAffinity(s32 core, bool set) { MESOSPHERE_ASSERT(0 <= core && core < static_cast(cpu::NumCores)); if (set) { - this->mask |= GetCoreBit(core); + m_mask |= GetCoreBit(core); } else { - this->mask &= ~GetCoreBit(core); + m_mask &= ~GetCoreBit(core); } } constexpr ALWAYS_INLINE void SetAll() { - this->mask = AllowedAffinityMask; + m_mask = AllowedAffinityMask; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp index 38cf66e06..cdd385046 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp @@ -46,13 +46,13 @@ namespace ams::kern { protected: class TypeObj { private: - const char *name; - ClassTokenType class_token; + const char *m_name; + ClassTokenType m_class_token; public: - constexpr explicit TypeObj(const char *n, ClassTokenType tok) : name(n), class_token(tok) { /* ... */ } + constexpr explicit TypeObj(const char *n, ClassTokenType tok) : m_name(n), m_class_token(tok) { /* ... */ } - constexpr ALWAYS_INLINE const char *GetName() const { return this->name; } - constexpr ALWAYS_INLINE ClassTokenType GetClassToken() const { return this->class_token; } + constexpr ALWAYS_INLINE const char *GetName() const { return m_name; } + constexpr ALWAYS_INLINE ClassTokenType GetClassToken() const { return m_class_token; } constexpr ALWAYS_INLINE bool operator==(const TypeObj &rhs) { return this->GetClassToken() == rhs.GetClassToken(); @@ -69,11 +69,11 @@ namespace ams::kern { private: MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject); private: - std::atomic ref_count; + std::atomic m_ref_count; public: static KAutoObject *Create(KAutoObject *ptr); public: - constexpr ALWAYS_INLINE explicit KAutoObject() : ref_count(0) { MESOSPHERE_ASSERT_THIS(); } + constexpr ALWAYS_INLINE explicit KAutoObject() : m_ref_count(0) { MESOSPHERE_ASSERT_THIS(); } virtual ~KAutoObject() { MESOSPHERE_ASSERT_THIS(); } /* Destroy is responsible for destroying the auto object's resources when ref_count hits zero. */ @@ -85,7 +85,7 @@ namespace ams::kern { virtual KProcess *GetOwner() const { return nullptr; } u32 GetReferenceCount() const { - return this->ref_count; + return m_ref_count; } ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const { @@ -124,14 +124,14 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Atomically increment the reference count, only if it's positive. */ - u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire); + u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire); do { if (AMS_UNLIKELY(cur_ref_count == 0)) { MESOSPHERE_AUDIT(cur_ref_count != 0); return false; } MESOSPHERE_ABORT_UNLESS(cur_ref_count < cur_ref_count + 1); - } while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed)); + } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed)); return true; } @@ -140,10 +140,10 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Atomically decrement the reference count, not allowing it to become negative. */ - u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire); + u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire); do { MESOSPHERE_ABORT_UNLESS(cur_ref_count > 0); - } while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed)); + } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed)); /* If ref count hits zero, destroy the object. */ if (cur_ref_count - 1 == 0) { @@ -185,44 +185,44 @@ namespace ams::kern { template friend class KScopedAutoObject; private: - T *obj; + T *m_obj; private: constexpr ALWAYS_INLINE void Swap(KScopedAutoObject &rhs) { - std::swap(this->obj, rhs.obj); + std::swap(m_obj, rhs.m_obj); } public: - constexpr ALWAYS_INLINE KScopedAutoObject() : obj(nullptr) { /* ... */ } - constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : obj(o) { - if (this->obj != nullptr) { - this->obj->Open(); + constexpr ALWAYS_INLINE KScopedAutoObject() : m_obj(nullptr) { /* ... */ } + constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : m_obj(o) { + if (m_obj != nullptr) { + m_obj->Open(); } } ~KScopedAutoObject() { - if (this->obj != nullptr) { - this->obj->Close(); + if (m_obj != nullptr) { + m_obj->Close(); } - this->obj = nullptr; + m_obj = nullptr; } template requires (std::derived_from || std::derived_from) constexpr KScopedAutoObject(KScopedAutoObject &&rhs) { if constexpr (std::derived_from) { /* Upcast. */ - this->obj = rhs.obj; - rhs.obj = nullptr; + m_obj = rhs.m_obj; + rhs.m_obj = nullptr; } else { /* Downcast. */ T *derived = nullptr; - if (rhs.obj != nullptr) { - derived = rhs.obj->template DynamicCast(); + if (rhs.m_obj != nullptr) { + derived = rhs.m_obj->template DynamicCast(); if (derived == nullptr) { - rhs.obj->Close(); + rhs.m_obj->Close(); } } - this->obj = derived; - rhs.obj = nullptr; + m_obj = derived; + rhs.m_obj = nullptr; } } @@ -231,19 +231,19 @@ namespace ams::kern { return *this; } - constexpr ALWAYS_INLINE T *operator->() { return this->obj; } - constexpr ALWAYS_INLINE T &operator*() { return *this->obj; } + constexpr ALWAYS_INLINE T *operator->() { return m_obj; } + constexpr ALWAYS_INLINE T &operator*() { return *m_obj; } constexpr ALWAYS_INLINE void Reset(T *o) { KScopedAutoObject(o).Swap(*this); } - constexpr ALWAYS_INLINE T *GetPointerUnsafe() { return this->obj; } + constexpr ALWAYS_INLINE T *GetPointerUnsafe() { return m_obj; } - constexpr ALWAYS_INLINE T *ReleasePointerUnsafe() { T *ret = this->obj; this->obj = nullptr; return ret; } + constexpr ALWAYS_INLINE T *ReleasePointerUnsafe() { T *ret = m_obj; m_obj = nullptr; return ret; } - constexpr ALWAYS_INLINE bool IsNull() const { return this->obj == nullptr; } - constexpr ALWAYS_INLINE bool IsNotNull() const { return this->obj != nullptr; } + constexpr ALWAYS_INLINE bool IsNull() const { return m_obj == nullptr; } + constexpr ALWAYS_INLINE bool IsNotNull() const { return m_obj != nullptr; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp index 60ac2fef5..958426b67 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp @@ -28,30 +28,30 @@ namespace ams::kern { public: class ListAccessor : public KScopedLightLock { private: - ListType &list; + ListType &m_list; public: - explicit ListAccessor(KAutoObjectWithListContainer *container) : KScopedLightLock(container->lock), list(container->object_list) { /* ... */ } - explicit ListAccessor(KAutoObjectWithListContainer &container) : KScopedLightLock(container.lock), list(container.object_list) { /* ... */ } + explicit ListAccessor(KAutoObjectWithListContainer *container) : KScopedLightLock(container->m_lock), m_list(container->m_object_list) { /* ... */ } + explicit ListAccessor(KAutoObjectWithListContainer &container) : KScopedLightLock(container.m_lock), m_list(container.m_object_list) { /* ... */ } typename ListType::iterator begin() const { - return this->list.begin(); + return m_list.begin(); } typename ListType::iterator end() const { - return this->list.end(); + return m_list.end(); } typename ListType::iterator find(typename ListType::const_reference ref) const { - return this->list.find(ref); + return m_list.find(ref); } }; friend class ListAccessor; private: - KLightLock lock; - ListType object_list; + KLightLock m_lock; + ListType m_object_list; public: - constexpr KAutoObjectWithListContainer() : lock(), object_list() { MESOSPHERE_ASSERT_THIS(); } + constexpr KAutoObjectWithListContainer() : m_lock(), m_object_list() { MESOSPHERE_ASSERT_THIS(); } void Initialize() { MESOSPHERE_ASSERT_THIS(); } void Finalize() { MESOSPHERE_ASSERT_THIS(); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_beta.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_beta.hpp index f5fec3e88..0acf0a4ed 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_beta.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_beta.hpp @@ -29,10 +29,10 @@ namespace ams::kern { private: /* NOTE: Official KBeta has size 0x88, corresponding to 0x58 bytes of fields. */ /* TODO: Add these fields, if KBeta is ever instantiable in the NX kernel. */ - util::IntrusiveListNode process_list_node; + util::IntrusiveListNode m_process_list_node; public: explicit KBeta() - : process_list_node() + : m_process_list_node() { /* ... */ } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp index 7c9863049..441514bac 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp @@ -200,14 +200,14 @@ namespace ams::kern { CapabilityFlag | CapabilityFlag; private: - u8 svc_access_flags[SvcFlagCount]{}; - u8 irq_access_flags[IrqFlagCount]{}; - u64 core_mask{}; - u64 priority_mask{}; - util::BitPack32 debug_capabilities{0}; - s32 handle_table_size{}; - util::BitPack32 intended_kernel_version{0}; - u32 program_type{}; + u8 m_svc_access_flags[SvcFlagCount]{}; + u8 m_irq_access_flags[IrqFlagCount]{}; + u64 m_core_mask{}; + u64 m_priority_mask{}; + util::BitPack32 m_debug_capabilities{0}; + s32 m_handle_table_size{}; + util::BitPack32 m_intended_kernel_version{0}; + u32 m_program_type{}; private: static constexpr ALWAYS_INLINE void SetSvcAllowedImpl(u8 *data, u32 id) { constexpr size_t BitsPerWord = BITSIZEOF(*data); @@ -228,8 +228,8 @@ namespace ams::kern { } bool SetSvcAllowed(u32 id) { - if (id < BITSIZEOF(this->svc_access_flags)) { - SetSvcAllowedImpl(this->svc_access_flags, id); + if (id < BITSIZEOF(m_svc_access_flags)) { + SetSvcAllowedImpl(m_svc_access_flags, id); return true; } else { return false; @@ -237,9 +237,9 @@ namespace ams::kern { } bool SetInterruptPermitted(u32 id) { - constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]); - if (id < BITSIZEOF(this->irq_access_flags)) { - this->irq_access_flags[id / BitsPerWord] |= (1ul << (id % BitsPerWord)); + constexpr size_t BitsPerWord = BITSIZEOF(m_irq_access_flags[0]); + if (id < BITSIZEOF(m_irq_access_flags)) { + m_irq_access_flags[id / BitsPerWord] |= (1ul << (id % BitsPerWord)); return true; } else { return false; @@ -266,14 +266,14 @@ namespace ams::kern { Result Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table); Result Initialize(svc::KUserPointer user_caps, s32 num_caps, KProcessPageTable *page_table); - constexpr u64 GetCoreMask() const { return this->core_mask; } - constexpr u64 GetPriorityMask() const { return this->priority_mask; } - constexpr s32 GetHandleTableSize() const { return this->handle_table_size; } + constexpr u64 GetCoreMask() const { return m_core_mask; } + constexpr u64 GetPriorityMask() const { return m_priority_mask; } + constexpr s32 GetHandleTableSize() const { return m_handle_table_size; } ALWAYS_INLINE void CopySvcPermissionsTo(KThread::StackParameters &sp) const { - static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission)); /* Copy permissions. */ - std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags)); + std::memcpy(sp.svc_permission, m_svc_access_flags, sizeof(m_svc_access_flags)); /* Clear specific SVCs based on our state. */ ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); @@ -284,9 +284,9 @@ namespace ams::kern { } ALWAYS_INLINE void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) const { - static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission)); /* Clear all permissions. */ - std::memset(sp.svc_permission, 0, sizeof(this->svc_access_flags)); + std::memset(sp.svc_permission, 0, sizeof(m_svc_access_flags)); /* Set specific SVCs based on our state. */ SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState); @@ -297,12 +297,12 @@ namespace ams::kern { } ALWAYS_INLINE void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) const { - static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission)); /* Get whether we have access to return from exception. */ const bool return_from_exception = GetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); /* Copy permissions. */ - std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags)); + std::memcpy(sp.svc_permission, m_svc_access_flags, sizeof(m_svc_access_flags)); /* Clear/Set specific SVCs based on our state. */ ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); @@ -313,21 +313,21 @@ namespace ams::kern { } ALWAYS_INLINE void CopyEnterExceptionSvcPermissionsTo(KThread::StackParameters &sp) { - static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission)); /* Set ReturnFromException if allowed. */ - if (GetSvcAllowedImpl(this->svc_access_flags, svc::SvcId_ReturnFromException)) { + if (GetSvcAllowedImpl(m_svc_access_flags, svc::SvcId_ReturnFromException)) { SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); } /* Set GetInfo if allowed. */ - if (GetSvcAllowedImpl(this->svc_access_flags, svc::SvcId_GetInfo)) { + if (GetSvcAllowedImpl(m_svc_access_flags, svc::SvcId_GetInfo)) { SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo); } } ALWAYS_INLINE void CopyLeaveExceptionSvcPermissionsTo(KThread::StackParameters &sp) { - static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission)); /* Clear ReturnFromException. */ ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); @@ -339,24 +339,24 @@ namespace ams::kern { } constexpr bool IsPermittedInterrupt(u32 id) const { - constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]); - if (id < BITSIZEOF(this->irq_access_flags)) { - return (this->irq_access_flags[id / BitsPerWord] & (1ul << (id % BitsPerWord))) != 0; + constexpr size_t BitsPerWord = BITSIZEOF(m_irq_access_flags[0]); + if (id < BITSIZEOF(m_irq_access_flags)) { + return (m_irq_access_flags[id / BitsPerWord] & (1ul << (id % BitsPerWord))) != 0; } else { return false; } } constexpr bool IsPermittedDebug() const { - return this->debug_capabilities.Get(); + return m_debug_capabilities.Get(); } constexpr bool CanForceDebug() const { - return this->debug_capabilities.Get(); + return m_debug_capabilities.Get(); } - constexpr u32 GetIntendedKernelMajorVersion() const { return this->intended_kernel_version.Get(); } - constexpr u32 GetIntendedKernelMinorVersion() const { return this->intended_kernel_version.Get(); } + constexpr u32 GetIntendedKernelMajorVersion() const { return m_intended_kernel_version.Get(); } + constexpr u32 GetIntendedKernelMinorVersion() const { return m_intended_kernel_version.Get(); } constexpr u32 GetIntendedKernelVersion() const { return ams::svc::EncodeKernelVersion(this->GetIntendedKernelMajorVersion(), this->GetIntendedKernelMinorVersion()); } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_client_port.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_client_port.hpp index fd11a4795..d2462d0fc 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_client_port.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_client_port.hpp @@ -28,23 +28,23 @@ namespace ams::kern { class KClientPort final : public KSynchronizationObject { MESOSPHERE_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject); private: - std::atomic num_sessions; - std::atomic peak_sessions; - s32 max_sessions; - KPort *parent; + std::atomic m_num_sessions; + std::atomic m_peak_sessions; + s32 m_max_sessions; + KPort *m_parent; public: - constexpr KClientPort() : num_sessions(), peak_sessions(), max_sessions(), parent() { /* ... */ } + constexpr KClientPort() : m_num_sessions(), m_peak_sessions(), m_max_sessions(), m_parent() { /* ... */ } virtual ~KClientPort() { /* ... */ } void Initialize(KPort *parent, s32 max_sessions); void OnSessionFinalized(); void OnServerClosed(); - constexpr const KPort *GetParent() const { return this->parent; } + constexpr const KPort *GetParent() const { return m_parent; } - ALWAYS_INLINE s32 GetNumSessions() const { return this->num_sessions; } - ALWAYS_INLINE s32 GetPeakSessions() const { return this->peak_sessions; } - ALWAYS_INLINE s32 GetMaxSessions() const { return this->max_sessions; } + ALWAYS_INLINE s32 GetNumSessions() const { return m_num_sessions; } + ALWAYS_INLINE s32 GetPeakSessions() const { return m_peak_sessions; } + ALWAYS_INLINE s32 GetMaxSessions() const { return m_max_sessions; } bool IsLight() const; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_client_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_client_session.hpp index 2725100fe..3e6de1822 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_client_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_client_session.hpp @@ -24,20 +24,20 @@ namespace ams::kern { class KClientSession final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KClientSession, KAutoObject); private: - KSession *parent; + KSession *m_parent; public: - constexpr KClientSession() : parent() { /* ... */ } + constexpr KClientSession() : m_parent() { /* ... */ } virtual ~KClientSession() { /* ... */ } void Initialize(KSession *parent) { /* Set member variables. */ - this->parent = parent; + m_parent = parent; } virtual void Destroy() override; static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } - constexpr KSession *GetParent() const { return this->parent; } + constexpr KSession *GetParent() const { return m_parent; } Result SendSyncRequest(uintptr_t address, size_t size); Result SendAsyncRequest(KWritableEvent *event, uintptr_t address, size_t size); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp index 8fcbe4a17..6f94c8f2f 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp @@ -23,15 +23,15 @@ namespace ams::kern { class KCodeMemory final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject); private: - TYPED_STORAGE(KPageGroup) page_group; - KProcess *owner; - KProcessAddress address; - KLightLock lock; - bool is_initialized; - bool is_owner_mapped; - bool is_mapped; + TYPED_STORAGE(KPageGroup) m_page_group; + KProcess *m_owner; + KProcessAddress m_address; + KLightLock m_lock; + bool m_is_initialized; + bool m_is_owner_mapped; + bool m_is_mapped; public: - explicit KCodeMemory() : owner(nullptr), address(Null), is_initialized(false), is_owner_mapped(false), is_mapped(false) { + explicit KCodeMemory() : m_owner(nullptr), m_address(Null), m_is_initialized(false), m_is_owner_mapped(false), m_is_mapped(false) { /* ... */ } @@ -45,12 +45,12 @@ namespace ams::kern { Result MapToOwner(KProcessAddress address, size_t size, ams::svc::MemoryPermission perm); Result UnmapFromOwner(KProcessAddress address, size_t size); - virtual bool IsInitialized() const override { return this->is_initialized; } + virtual bool IsInitialized() const override { return m_is_initialized; } static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } - KProcess *GetOwner() const { return this->owner; } - KProcessAddress GetSourceAddress() { return this->address; } - size_t GetSize() const { return this->is_initialized ? GetReference(this->page_group).GetNumPages() * PageSize : 0; } + KProcess *GetOwner() const { return m_owner; } + KProcessAddress GetSourceAddress() { return m_address; } + size_t GetSize() const { return m_is_initialized ? GetReference(m_page_group).GetNumPages() * PageSize : 0; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp index 3c8928157..d967ea430 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp @@ -24,9 +24,9 @@ namespace ams::kern { public: using ThreadTree = typename KThread::ConditionVariableThreadTreeType; private: - ThreadTree tree; + ThreadTree m_tree; public: - constexpr KConditionVariable() : tree() { /* ... */ } + constexpr KConditionVariable() : m_tree() { /* ... */ } /* Arbitration. */ Result SignalToAddress(KProcessAddress addr); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp index dc3951236..e93f1437e 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp @@ -26,11 +26,11 @@ namespace ams::kern { protected: using DebugEventList = util::IntrusiveListBaseTraits::ListType; private: - DebugEventList event_info_list; - u32 continue_flags; - KProcess *process; - KLightLock lock; - KProcess::State old_process_state; + DebugEventList m_event_info_list; + u32 m_continue_flags; + KProcess *m_process; + KLightLock m_lock; + KProcess::State m_old_process_state; public: explicit KDebugBase() { /* ... */ } virtual ~KDebugBase() { /* ... */ } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp index 42e72a71e..82ed15c90 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp @@ -24,19 +24,19 @@ namespace ams::kern { class KDeviceAddressSpace final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KDeviceAddressSpace, KAutoObject); private: - KLightLock lock; - KDevicePageTable table; - u64 space_address; - u64 space_size; - bool is_initialized; + KLightLock m_lock; + KDevicePageTable m_table; + u64 m_space_address; + u64 m_space_size; + bool m_is_initialized; public: - constexpr KDeviceAddressSpace() : lock(), table(), space_address(), space_size(), is_initialized() { /* ... */ } + constexpr KDeviceAddressSpace() : m_lock(), m_table(), m_space_address(), m_space_size(), m_is_initialized() { /* ... */ } virtual ~KDeviceAddressSpace() { /* ... */ } Result Initialize(u64 address, u64 size); virtual void Finalize() override; - virtual bool IsInitialized() const override { return this->is_initialized; } + virtual bool IsInitialized() const override { return m_is_initialized; } static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } Result Attach(ams::svc::DeviceName device_name); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp index 0943e32ab..64bd4ca98 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp @@ -28,19 +28,19 @@ namespace ams::kern { public: class PageBuffer { private: - u8 buffer[PageSize]; + u8 m_buffer[PageSize]; }; static_assert(sizeof(PageBuffer) == PageSize); private: - KSpinLock lock; - KPageBitmap page_bitmap; - size_t used; - size_t peak; - size_t count; - KVirtualAddress address; - size_t size; + KSpinLock m_lock; + KPageBitmap m_page_bitmap; + size_t m_used; + size_t m_peak; + size_t m_count; + KVirtualAddress m_address; + size_t m_size; public: - KDynamicPageManager() : lock(), page_bitmap(), used(), peak(), count(), address(), size() { /* ... */ } + KDynamicPageManager() : m_lock(), m_page_bitmap(), m_used(), m_peak(), m_count(), m_address(), m_size() { /* ... */ } Result Initialize(KVirtualAddress memory, size_t sz) { /* We need to have positive size. */ @@ -51,40 +51,40 @@ namespace ams::kern { const size_t allocatable_size = sz - management_size; /* Set tracking fields. */ - this->address = memory; - this->size = util::AlignDown(allocatable_size, sizeof(PageBuffer)); - this->count = allocatable_size / sizeof(PageBuffer); - R_UNLESS(this->count > 0, svc::ResultOutOfMemory()); + m_address = memory; + m_size = util::AlignDown(allocatable_size, sizeof(PageBuffer)); + m_count = allocatable_size / sizeof(PageBuffer); + R_UNLESS(m_count > 0, svc::ResultOutOfMemory()); /* Clear the management region. */ - u64 *management_ptr = GetPointer(this->address + allocatable_size); + u64 *management_ptr = GetPointer(m_address + allocatable_size); std::memset(management_ptr, 0, management_size); /* Initialize the bitmap. */ - this->page_bitmap.Initialize(management_ptr, this->count); + m_page_bitmap.Initialize(management_ptr, m_count); /* Free the pages to the bitmap. */ - std::memset(GetPointer(this->address), 0, this->count * sizeof(PageBuffer)); - for (size_t i = 0; i < this->count; i++) { - this->page_bitmap.SetBit(i); + std::memset(GetPointer(m_address), 0, m_count * sizeof(PageBuffer)); + for (size_t i = 0; i < m_count; i++) { + m_page_bitmap.SetBit(i); } return ResultSuccess(); } - constexpr KVirtualAddress GetAddress() const { return this->address; } - constexpr size_t GetSize() const { return this->size; } - constexpr size_t GetUsed() const { return this->used; } - constexpr size_t GetPeak() const { return this->peak; } - constexpr size_t GetCount() const { return this->count; } + constexpr KVirtualAddress GetAddress() const { return m_address; } + constexpr size_t GetSize() const { return m_size; } + constexpr size_t GetUsed() const { return m_used; } + constexpr size_t GetPeak() const { return m_peak; } + constexpr size_t GetCount() const { return m_count; } PageBuffer *Allocate() { /* Take the lock. */ KScopedInterruptDisable di; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); /* Find a random free block. */ - ssize_t soffset = this->page_bitmap.FindFreeBlock(true); + ssize_t soffset = m_page_bitmap.FindFreeBlock(true); if (AMS_UNLIKELY(soffset < 0)) { return nullptr; } @@ -92,23 +92,23 @@ namespace ams::kern { const size_t offset = static_cast(soffset); /* Update our tracking. */ - this->page_bitmap.ClearBit(offset); - this->peak = std::max(this->peak, (++this->used)); + m_page_bitmap.ClearBit(offset); + m_peak = std::max(m_peak, (++m_used)); - return GetPointer(this->address) + offset; + return GetPointer(m_address) + offset; } void Free(PageBuffer *pb) { /* Take the lock. */ KScopedInterruptDisable di; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); /* Set the bit for the free page. */ - size_t offset = (reinterpret_cast(pb) - GetInteger(this->address)) / sizeof(PageBuffer); - this->page_bitmap.SetBit(offset); + size_t offset = (reinterpret_cast(pb) - GetInteger(m_address)) / sizeof(PageBuffer); + m_page_bitmap.SetBit(offset); /* Decrement our used count. */ - --this->used; + --m_used; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp index c8ce85326..8d4e05e42 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp @@ -30,28 +30,28 @@ namespace ams::kern { using Impl = impl::KSlabHeapImpl; using PageBuffer = KDynamicPageManager::PageBuffer; private: - Impl impl; - KDynamicPageManager *page_allocator; - std::atomic used; - std::atomic peak; - std::atomic count; - KVirtualAddress address; - size_t size; + Impl m_impl; + KDynamicPageManager *m_page_allocator; + std::atomic m_used; + std::atomic m_peak; + std::atomic m_count; + KVirtualAddress m_address; + size_t m_size; private: ALWAYS_INLINE Impl *GetImpl() { - return std::addressof(this->impl); + return std::addressof(m_impl); } ALWAYS_INLINE const Impl *GetImpl() const { - return std::addressof(this->impl); + return std::addressof(m_impl); } public: - constexpr KDynamicSlabHeap() : impl(), page_allocator(), used(), peak(), count(), address(), size() { /* ... */ } + constexpr KDynamicSlabHeap() : m_impl(), m_page_allocator(), m_used(), m_peak(), m_count(), m_address(), m_size() { /* ... */ } - constexpr KVirtualAddress GetAddress() const { return this->address; } - constexpr size_t GetSize() const { return this->size; } - constexpr size_t GetUsed() const { return this->used; } - constexpr size_t GetPeak() const { return this->peak; } - constexpr size_t GetCount() const { return this->count; } + constexpr KVirtualAddress GetAddress() const { return m_address; } + constexpr size_t GetSize() const { return m_size; } + constexpr size_t GetUsed() const { return m_used; } + constexpr size_t GetPeak() const { return m_peak; } + constexpr size_t GetCount() const { return m_count; } constexpr bool IsInRange(KVirtualAddress addr) const { return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1; @@ -59,22 +59,22 @@ namespace ams::kern { void Initialize(KVirtualAddress memory, size_t sz) { /* Set tracking fields. */ - this->address = memory; - this->count = sz / sizeof(T); - this->size = this->count * sizeof(T); + m_address = memory; + m_count = sz / sizeof(T); + m_size = m_count * sizeof(T); /* Free blocks to memory. */ - u8 *cur = GetPointer(this->address + this->size); - for (size_t i = 0; i < this->count; i++) { + u8 *cur = GetPointer(m_address + m_size); + for (size_t i = 0; i < m_count; i++) { cur -= sizeof(T); this->GetImpl()->Free(cur); } } void Initialize(KDynamicPageManager *page_allocator) { - this->page_allocator = page_allocator; - this->address = this->page_allocator->GetAddress(); - this->size = this->page_allocator->GetSize(); + m_page_allocator = page_allocator; + m_address = m_page_allocator->GetAddress(); + m_size = m_page_allocator->GetSize(); } void Initialize(KDynamicPageManager *page_allocator, size_t num_objects) { @@ -84,13 +84,13 @@ namespace ams::kern { this->Initialize(page_allocator); /* Allocate until we have the correct number of objects. */ - while (this->count < num_objects) { - auto *allocated = reinterpret_cast(this->page_allocator->Allocate()); + while (m_count < num_objects) { + auto *allocated = reinterpret_cast(m_page_allocator->Allocate()); MESOSPHERE_ABORT_UNLESS(allocated != nullptr); for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) { this->GetImpl()->Free(allocated + i); } - this->count += sizeof(PageBuffer) / sizeof(T); + m_count += sizeof(PageBuffer) / sizeof(T); } } @@ -99,14 +99,14 @@ namespace ams::kern { /* If we fail to allocate, try to get a new page from our next allocator. */ if (AMS_UNLIKELY(allocated == nullptr)) { - if (this->page_allocator != nullptr) { - allocated = reinterpret_cast(this->page_allocator->Allocate()); + if (m_page_allocator != nullptr) { + allocated = reinterpret_cast(m_page_allocator->Allocate()); if (allocated != nullptr) { /* If we succeeded in getting a page, free the rest to our slab. */ for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) { this->GetImpl()->Free(allocated + i); } - this->count += sizeof(PageBuffer) / sizeof(T); + m_count += sizeof(PageBuffer) / sizeof(T); } } } @@ -116,10 +116,10 @@ namespace ams::kern { new (allocated) T(); /* Update our tracking. */ - size_t used = ++this->used; - size_t peak = this->peak; + size_t used = ++m_used; + size_t peak = m_peak; while (peak < used) { - if (this->peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) { + if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) { break; } } @@ -130,7 +130,7 @@ namespace ams::kern { void Free(T *t) { this->GetImpl()->Free(t); - --this->used; + --m_used; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp index 2689bcf61..728432146 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp @@ -25,13 +25,13 @@ namespace ams::kern { class KEvent final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KEvent, KAutoObject); private: - KReadableEvent readable_event; - KWritableEvent writable_event; - KProcess *owner; - bool initialized; + KReadableEvent m_readable_event; + KWritableEvent m_writable_event; + KProcess *m_owner; + bool m_initialized; public: constexpr KEvent() - : readable_event(), writable_event(), owner(), initialized() + : m_readable_event(), m_writable_event(), m_owner(), m_initialized() { /* ... */ } @@ -41,15 +41,15 @@ namespace ams::kern { void Initialize(); virtual void Finalize() override; - virtual bool IsInitialized() const override { return this->initialized; } - virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->owner); } + virtual bool IsInitialized() const override { return m_initialized; } + virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(m_owner); } static void PostDestroy(uintptr_t arg); - virtual KProcess *GetOwner() const override { return this->owner; } + virtual KProcess *GetOwner() const override { return m_owner; } - KReadableEvent &GetReadableEvent() { return this->readable_event; } - KWritableEvent &GetWritableEvent() { return this->writable_event; } + KReadableEvent &GetReadableEvent() { return m_readable_event; } + KWritableEvent &GetWritableEvent() { return m_writable_event; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp index 650a39d67..5e5863116 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp @@ -61,38 +61,38 @@ namespace ams::kern { u16 type; } info; Entry *next_free_entry; - } meta; - KAutoObject *object; + } m_meta; + KAutoObject *m_object; public: - constexpr Entry() : meta(), object(nullptr) { /* ... */ } + constexpr Entry() : m_meta(), m_object(nullptr) { /* ... */ } constexpr ALWAYS_INLINE void SetFree(Entry *next) { - this->object = nullptr; - this->meta.next_free_entry = next; + m_object = nullptr; + m_meta.next_free_entry = next; } constexpr ALWAYS_INLINE void SetUsed(KAutoObject *obj, u16 linear_id, u16 type) { - this->object = obj; - this->meta.info = { linear_id, type }; + m_object = obj; + m_meta.info = { linear_id, type }; } - constexpr ALWAYS_INLINE KAutoObject *GetObject() const { return this->object; } - constexpr ALWAYS_INLINE Entry *GetNextFreeEntry() const { return this->meta.next_free_entry; } - constexpr ALWAYS_INLINE u16 GetLinearId() const { return this->meta.info.linear_id; } - constexpr ALWAYS_INLINE u16 GetType() const { return this->meta.info.type; } + constexpr ALWAYS_INLINE KAutoObject *GetObject() const { return m_object; } + constexpr ALWAYS_INLINE Entry *GetNextFreeEntry() const { return m_meta.next_free_entry; } + constexpr ALWAYS_INLINE u16 GetLinearId() const { return m_meta.info.linear_id; } + constexpr ALWAYS_INLINE u16 GetType() const { return m_meta.info.type; } }; private: - mutable KSpinLock lock; - Entry *table; - Entry *free_head; - Entry entries[MaxTableSize]; - u16 table_size; - u16 max_count; - u16 next_linear_id; - u16 count; + mutable KSpinLock m_lock; + Entry *m_table; + Entry *m_free_head; + Entry m_entries[MaxTableSize]; + u16 m_table_size; + u16 m_max_count; + u16 m_next_linear_id; + u16 m_count; public: constexpr KHandleTable() : - lock(), table(nullptr), free_head(nullptr), entries(), table_size(0), max_count(0), next_linear_id(MinLinearId), count(0) + m_lock(), m_table(nullptr), m_free_head(nullptr), m_entries(), m_table_size(0), m_max_count(0), m_next_linear_id(MinLinearId), m_count(0) { MESOSPHERE_ASSERT_THIS(); } constexpr NOINLINE Result Initialize(s32 size) { @@ -101,26 +101,26 @@ namespace ams::kern { R_UNLESS(size <= static_cast(MaxTableSize), svc::ResultOutOfMemory()); /* Initialize all fields. */ - this->table = this->entries; - this->table_size = (size <= 0) ? MaxTableSize : size; - this->next_linear_id = MinLinearId; - this->count = 0; - this->max_count = 0; + m_table = m_entries; + m_table_size = (size <= 0) ? MaxTableSize : size; + m_next_linear_id = MinLinearId; + m_count = 0; + m_max_count = 0; /* Free all entries. */ - for (size_t i = 0; i < static_cast(this->table_size - 1); i++) { - this->entries[i].SetFree(std::addressof(this->entries[i + 1])); + for (size_t i = 0; i < static_cast(m_table_size - 1); i++) { + m_entries[i].SetFree(std::addressof(m_entries[i + 1])); } - this->entries[this->table_size - 1].SetFree(nullptr); + m_entries[m_table_size - 1].SetFree(nullptr); - this->free_head = std::addressof(this->entries[0]); + m_free_head = std::addressof(m_entries[0]); return ResultSuccess(); } - constexpr ALWAYS_INLINE size_t GetTableSize() const { return this->table_size; } - constexpr ALWAYS_INLINE size_t GetCount() const { return this->count; } - constexpr ALWAYS_INLINE size_t GetMaxCount() const { return this->max_count; } + constexpr ALWAYS_INLINE size_t GetTableSize() const { return m_table_size; } + constexpr ALWAYS_INLINE size_t GetCount() const { return m_count; } + constexpr ALWAYS_INLINE size_t GetMaxCount() const { return m_max_count; } NOINLINE Result Finalize(); NOINLINE bool Remove(ams::svc::Handle handle); @@ -129,7 +129,7 @@ namespace ams::kern { ALWAYS_INLINE KScopedAutoObject GetObjectWithoutPseudoHandle(ams::svc::Handle handle) const { /* Lock and look up in table. */ KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); if constexpr (std::is_same::value) { return this->GetObjectImpl(handle); @@ -163,7 +163,7 @@ namespace ams::kern { KScopedAutoObject GetObjectForIpcWithoutPseudoHandle(ams::svc::Handle handle) const { /* Lock and look up in table. */ KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); KAutoObject *obj = this->GetObjectImpl(handle); if (AMS_LIKELY(obj != nullptr)) { @@ -190,7 +190,7 @@ namespace ams::kern { ALWAYS_INLINE KScopedAutoObject GetObjectByIndex(ams::svc::Handle *out_handle, size_t index) const { MESOSPHERE_ASSERT_THIS(); KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); return this->GetObjectByIndexImpl(out_handle, index); } @@ -217,7 +217,7 @@ namespace ams::kern { { /* Lock the table. */ KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); for (num_opened = 0; num_opened < num_handles; num_opened++) { /* Get the current handle. */ const auto cur_handle = handles[num_opened]; @@ -258,38 +258,38 @@ namespace ams::kern { constexpr ALWAYS_INLINE Entry *AllocateEntry() { MESOSPHERE_ASSERT_THIS(); - MESOSPHERE_ASSERT(this->count < this->table_size); + MESOSPHERE_ASSERT(m_count < m_table_size); - Entry *entry = this->free_head; - this->free_head = entry->GetNextFreeEntry(); + Entry *entry = m_free_head; + m_free_head = entry->GetNextFreeEntry(); - this->count++; - this->max_count = std::max(this->max_count, this->count); + m_count++; + m_max_count = std::max(m_max_count, m_count); return entry; } constexpr ALWAYS_INLINE void FreeEntry(Entry *entry) { MESOSPHERE_ASSERT_THIS(); - MESOSPHERE_ASSERT(this->count > 0); + MESOSPHERE_ASSERT(m_count > 0); - entry->SetFree(this->free_head); - this->free_head = entry; + entry->SetFree(m_free_head); + m_free_head = entry; - this->count--; + m_count--; } constexpr ALWAYS_INLINE u16 AllocateLinearId() { - const u16 id = this->next_linear_id++; - if (this->next_linear_id > MaxLinearId) { - this->next_linear_id = MinLinearId; + const u16 id = m_next_linear_id++; + if (m_next_linear_id > MaxLinearId) { + m_next_linear_id = MinLinearId; } return id; } constexpr ALWAYS_INLINE size_t GetEntryIndex(Entry *entry) { - const size_t index = entry - this->table; - MESOSPHERE_ASSERT(index < this->table_size); + const size_t index = entry - m_table; + MESOSPHERE_ASSERT(index < m_table_size); return index; } @@ -311,12 +311,12 @@ namespace ams::kern { if (linear_id == 0) { return nullptr; } - if (index >= this->table_size) { + if (index >= m_table_size) { return nullptr; } /* Get the entry, and ensure our serial id is correct. */ - Entry *entry = std::addressof(this->table[index]); + Entry *entry = std::addressof(m_table[index]); if (entry->GetObject() == nullptr) { return nullptr; } @@ -346,12 +346,12 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Index must be in bounds. */ - if (index >= this->table_size || this->table == nullptr) { + if (index >= m_table_size || m_table == nullptr) { return nullptr; } /* Ensure entry has an object. */ - Entry *entry = std::addressof(this->table[index]); + Entry *entry = std::addressof(m_table[index]); if (entry->GetObject() == nullptr) { return nullptr; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp index c7f29758e..9da5fde41 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp @@ -24,41 +24,41 @@ namespace ams::kern { private: using TimerTaskTree = util::IntrusiveRedBlackTreeBaseTraits::TreeType; private: - KSpinLock lock; - TimerTaskTree task_tree; - KTimerTask *next_task; + KSpinLock m_lock; + TimerTaskTree m_task_tree; + KTimerTask *m_next_task; public: - constexpr ALWAYS_INLINE KHardwareTimerBase() : lock(), task_tree(), next_task(nullptr) { /* ... */ } + constexpr ALWAYS_INLINE KHardwareTimerBase() : m_lock(), m_task_tree(), m_next_task(nullptr) { /* ... */ } private: ALWAYS_INLINE void RemoveTaskFromTree(KTimerTask *task) { /* Erase from the tree. */ - auto it = this->task_tree.erase(this->task_tree.iterator_to(*task)); + auto it = m_task_tree.erase(m_task_tree.iterator_to(*task)); /* Clear the task's scheduled time. */ task->SetTime(0); /* Update our next task if relevant. */ - if (this->next_task == task) { - this->next_task = (it != this->task_tree.end()) ? std::addressof(*it) : nullptr; + if (m_next_task == task) { + m_next_task = (it != m_task_tree.end()) ? std::addressof(*it) : nullptr; } } public: NOINLINE void CancelTask(KTimerTask *task) { KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); if (const s64 task_time = task->GetTime(); task_time > 0) { this->RemoveTaskFromTree(task); } } protected: - ALWAYS_INLINE KSpinLock &GetLock() { return this->lock; } + ALWAYS_INLINE KSpinLock &GetLock() { return m_lock; } ALWAYS_INLINE s64 DoInterruptTaskImpl(s64 cur_time) { /* We want to handle all tasks, returning the next time that a task is scheduled. */ while (true) { /* Get the next task. If there isn't one, return 0. */ - KTimerTask *task = this->next_task; + KTimerTask *task = m_next_task; if (task == nullptr) { return 0; } @@ -81,13 +81,13 @@ namespace ams::kern { /* Set the task's time, and insert it into our tree. */ task->SetTime(task_time); - this->task_tree.insert(*task); + m_task_tree.insert(*task); /* Update our next task if relevant. */ - if (this->next_task != nullptr && this->next_task->GetTime() <= task_time) { + if (m_next_task != nullptr && m_next_task->GetTime() <= task_time) { return false; } - this->next_task = task; + m_next_task = task; return true; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp index bde2dddb3..db3d58e6b 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp @@ -24,103 +24,103 @@ namespace ams::kern { private: static constexpr u32 Magic = util::FourCC<'K','I','P','1'>::Code; private: - u32 magic; - u8 name[12]; - u64 program_id; - u32 version; - u8 priority; - u8 ideal_core_id; - u8 _1E; - u8 flags; - u32 rx_address; - u32 rx_size; - u32 rx_compressed_size; - u32 affinity_mask; - u32 ro_address; - u32 ro_size; - u32 ro_compressed_size; - u32 stack_size; - u32 rw_address; - u32 rw_size; - u32 rw_compressed_size; - u32 _4C; - u32 bss_address; - u32 bss_size; - u32 pad[(0x80 - 0x58) / sizeof(u32)]; - u32 capabilities[0x80 / sizeof(u32)]; + u32 m_magic; + u8 m_name[12]; + u64 m_program_id; + u32 m_version; + u8 m_priority; + u8 m_ideal_core_id; + u8 m_1E; + u8 m_flags; + u32 m_rx_address; + u32 m_rx_size; + u32 m_rx_compressed_size; + u32 m_affinity_mask; + u32 m_ro_address; + u32 m_ro_size; + u32 m_ro_compressed_size; + u32 m_stack_size; + u32 m_rw_address; + u32 m_rw_size; + u32 m_rw_compressed_size; + u32 m_4C; + u32 m_bss_address; + u32 m_bss_size; + u32 m_pad[(0x80 - 0x58) / sizeof(u32)]; + u32 m_capabilities[0x80 / sizeof(u32)]; public: - constexpr bool IsValid() const { return this->magic == Magic; } + constexpr bool IsValid() const { return m_magic == Magic; } constexpr void GetName(char *dst, size_t size) const { std::memset(dst, 0, size); - std::memcpy(dst, this->name, std::min(sizeof(this->name), size)); + std::memcpy(dst, m_name, std::min(sizeof(m_name), size)); } - constexpr const u32 *GetCapabilities() const { return this->capabilities; } - constexpr size_t GetNumCapabilities() const { return util::size(this->capabilities); } + constexpr const u32 *GetCapabilities() const { return m_capabilities; } + constexpr size_t GetNumCapabilities() const { return util::size(m_capabilities); } - constexpr u64 GetProgramId() const { return this->program_id; } - constexpr u32 GetVersion() const { return this->version; } - constexpr u8 GetPriority() const { return this->priority; } - constexpr u8 GetIdealCoreId() const { return this->ideal_core_id; } + constexpr u64 GetProgramId() const { return m_program_id; } + constexpr u32 GetVersion() const { return m_version; } + constexpr u8 GetPriority() const { return m_priority; } + constexpr u8 GetIdealCoreId() const { return m_ideal_core_id; } - constexpr bool IsRxCompressed() const { return (this->flags & (1 << 0)); } - constexpr bool IsRoCompressed() const { return (this->flags & (1 << 1)); } - constexpr bool IsRwCompressed() const { return (this->flags & (1 << 2)); } - constexpr bool Is64Bit() const { return (this->flags & (1 << 3)); } - constexpr bool Is64BitAddressSpace() const { return (this->flags & (1 << 4)); } - constexpr bool UsesSecureMemory() const { return (this->flags & (1 << 5)); } + constexpr bool IsRxCompressed() const { return (m_flags & (1 << 0)); } + constexpr bool IsRoCompressed() const { return (m_flags & (1 << 1)); } + constexpr bool IsRwCompressed() const { return (m_flags & (1 << 2)); } + constexpr bool Is64Bit() const { return (m_flags & (1 << 3)); } + constexpr bool Is64BitAddressSpace() const { return (m_flags & (1 << 4)); } + constexpr bool UsesSecureMemory() const { return (m_flags & (1 << 5)); } - constexpr u32 GetRxAddress() const { return this->rx_address; } - constexpr u32 GetRxSize() const { return this->rx_size; } - constexpr u32 GetRxCompressedSize() const { return this->rx_compressed_size; } - constexpr u32 GetRoAddress() const { return this->ro_address; } - constexpr u32 GetRoSize() const { return this->ro_size; } - constexpr u32 GetRoCompressedSize() const { return this->ro_compressed_size; } - constexpr u32 GetRwAddress() const { return this->rw_address; } - constexpr u32 GetRwSize() const { return this->rw_size; } - constexpr u32 GetRwCompressedSize() const { return this->rw_compressed_size; } - constexpr u32 GetBssAddress() const { return this->bss_address; } - constexpr u32 GetBssSize() const { return this->bss_size; } + constexpr u32 GetRxAddress() const { return m_rx_address; } + constexpr u32 GetRxSize() const { return m_rx_size; } + constexpr u32 GetRxCompressedSize() const { return m_rx_compressed_size; } + constexpr u32 GetRoAddress() const { return m_ro_address; } + constexpr u32 GetRoSize() const { return m_ro_size; } + constexpr u32 GetRoCompressedSize() const { return m_ro_compressed_size; } + constexpr u32 GetRwAddress() const { return m_rw_address; } + constexpr u32 GetRwSize() const { return m_rw_size; } + constexpr u32 GetRwCompressedSize() const { return m_rw_compressed_size; } + constexpr u32 GetBssAddress() const { return m_bss_address; } + constexpr u32 GetBssSize() const { return m_bss_size; } - constexpr u32 GetAffinityMask() const { return this->affinity_mask; } - constexpr u32 GetStackSize() const { return this->stack_size; } + constexpr u32 GetAffinityMask() const { return m_affinity_mask; } + constexpr u32 GetStackSize() const { return m_stack_size; } }; static_assert(sizeof(KInitialProcessHeader) == 0x100); class KInitialProcessReader { private: - KInitialProcessHeader *kip_header; + KInitialProcessHeader *m_kip_header; public: - constexpr KInitialProcessReader() : kip_header() { /* ... */ } + constexpr KInitialProcessReader() : m_kip_header() { /* ... */ } - constexpr const u32 *GetCapabilities() const { return this->kip_header->GetCapabilities(); } - constexpr size_t GetNumCapabilities() const { return this->kip_header->GetNumCapabilities(); } + constexpr const u32 *GetCapabilities() const { return m_kip_header->GetCapabilities(); } + constexpr size_t GetNumCapabilities() const { return m_kip_header->GetNumCapabilities(); } constexpr size_t GetBinarySize() const { - return sizeof(*kip_header) + this->kip_header->GetRxCompressedSize() + this->kip_header->GetRoCompressedSize() + this->kip_header->GetRwCompressedSize(); + return sizeof(*m_kip_header) + m_kip_header->GetRxCompressedSize() + m_kip_header->GetRoCompressedSize() + m_kip_header->GetRwCompressedSize(); } constexpr size_t GetSize() const { - if (const size_t bss_size = this->kip_header->GetBssSize(); bss_size != 0) { - return this->kip_header->GetBssAddress() + this->kip_header->GetBssSize(); + if (const size_t bss_size = m_kip_header->GetBssSize(); bss_size != 0) { + return m_kip_header->GetBssAddress() + m_kip_header->GetBssSize(); } else { - return this->kip_header->GetRwAddress() + this->kip_header->GetRwSize(); + return m_kip_header->GetRwAddress() + m_kip_header->GetRwSize(); } } - constexpr u8 GetPriority() const { return this->kip_header->GetPriority(); } - constexpr u8 GetIdealCoreId() const { return this->kip_header->GetIdealCoreId(); } - constexpr u32 GetAffinityMask() const { return this->kip_header->GetAffinityMask(); } - constexpr u32 GetStackSize() const { return this->kip_header->GetStackSize(); } + constexpr u8 GetPriority() const { return m_kip_header->GetPriority(); } + constexpr u8 GetIdealCoreId() const { return m_kip_header->GetIdealCoreId(); } + constexpr u32 GetAffinityMask() const { return m_kip_header->GetAffinityMask(); } + constexpr u32 GetStackSize() const { return m_kip_header->GetStackSize(); } - constexpr bool Is64Bit() const { return this->kip_header->Is64Bit(); } - constexpr bool Is64BitAddressSpace() const { return this->kip_header->Is64BitAddressSpace(); } - constexpr bool UsesSecureMemory() const { return this->kip_header->UsesSecureMemory(); } + constexpr bool Is64Bit() const { return m_kip_header->Is64Bit(); } + constexpr bool Is64BitAddressSpace() const { return m_kip_header->Is64BitAddressSpace(); } + constexpr bool UsesSecureMemory() const { return m_kip_header->UsesSecureMemory(); } bool Attach(u8 *bin) { if (KInitialProcessHeader *header = reinterpret_cast(bin); header->IsValid()) { - this->kip_header = header; + m_kip_header = header; return true; } else { return false; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp index ab58e75ef..acc72dc0d 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp @@ -27,10 +27,10 @@ namespace ams::kern { class KInterruptEvent final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KInterruptEvent, KReadableEvent); private: - s32 interrupt_id; - bool is_initialized; + s32 m_interrupt_id; + bool m_is_initialized; public: - constexpr KInterruptEvent() : interrupt_id(-1), is_initialized(false) { /* ... */ } + constexpr KInterruptEvent() : m_interrupt_id(-1), m_is_initialized(false) { /* ... */ } virtual ~KInterruptEvent() { /* ... */ } Result Initialize(int32_t interrupt_name, ams::svc::InterruptType type); @@ -38,22 +38,22 @@ namespace ams::kern { virtual Result Reset() override; - virtual bool IsInitialized() const override { return this->is_initialized; } + virtual bool IsInitialized() const override { return m_is_initialized; } static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } - constexpr s32 GetInterruptId() const { return this->interrupt_id; } + constexpr s32 GetInterruptId() const { return m_interrupt_id; } }; class KInterruptEventTask : public KSlabAllocated, public KInterruptTask { private: - KInterruptEvent *event; - KLightLock lock; + KInterruptEvent *m_event; + KLightLock m_lock; public: - constexpr KInterruptEventTask() : event(nullptr), lock() { /* ... */ } + constexpr KInterruptEventTask() : m_event(nullptr), m_lock() { /* ... */ } ~KInterruptEventTask() { /* ... */ } - KLightLock &GetLock() { return this->lock; } + KLightLock &GetLock() { return m_lock; } virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override; virtual void DoTask() override; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp index 01b0fae6b..bd39ba872 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp @@ -26,16 +26,16 @@ namespace ams::kern { class KInterruptTask : public KInterruptHandler { private: - KInterruptTask *next_task; + KInterruptTask *m_next_task; public: - constexpr ALWAYS_INLINE KInterruptTask() : next_task(nullptr) { /* ... */ } + constexpr ALWAYS_INLINE KInterruptTask() : m_next_task(nullptr) { /* ... */ } constexpr ALWAYS_INLINE KInterruptTask *GetNextTask() const { - return this->next_task; + return m_next_task; } constexpr ALWAYS_INLINE void SetNextTask(KInterruptTask *t) { - this->next_task = t; + m_next_task = t; } virtual void DoTask() = 0; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp index d3c4d5113..dd1d95c4d 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp @@ -24,28 +24,28 @@ namespace ams::kern { private: class TaskQueue { private: - KInterruptTask *head; - KInterruptTask *tail; + KInterruptTask *m_head; + KInterruptTask *m_tail; public: - constexpr TaskQueue() : head(nullptr), tail(nullptr) { /* ... */ } + constexpr TaskQueue() : m_head(nullptr), m_tail(nullptr) { /* ... */ } - constexpr KInterruptTask *GetHead() { return this->head; } - constexpr bool IsEmpty() const { return this->head == nullptr; } - constexpr void Clear() { this->head = nullptr; this->tail = nullptr; } + constexpr KInterruptTask *GetHead() { return m_head; } + constexpr bool IsEmpty() const { return m_head == nullptr; } + constexpr void Clear() { m_head = nullptr; m_tail = nullptr; } void Enqueue(KInterruptTask *task); void Dequeue(); }; private: - TaskQueue task_queue; - KThread *thread; + TaskQueue m_task_queue; + KThread *m_thread; private: static void ThreadFunction(uintptr_t arg); void ThreadFunctionImpl(); public: - constexpr KInterruptTaskManager() : task_queue(), thread(nullptr) { /* ... */ } + constexpr KInterruptTaskManager() : m_task_queue(), m_thread(nullptr) { /* ... */ } - constexpr KThread *GetThread() const { return this->thread; } + constexpr KThread *GetThread() const { return m_thread; } NOINLINE void Initialize(); void EnqueueTask(KInterruptTask *task); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_client_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_client_session.hpp index 93eb7ae77..bb55ad577 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_client_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_client_session.hpp @@ -24,20 +24,20 @@ namespace ams::kern { class KLightClientSession final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KLightClientSession, KAutoObject); private: - KLightSession *parent; + KLightSession *m_parent; public: - constexpr KLightClientSession() : parent() { /* ... */ } + constexpr KLightClientSession() : m_parent() { /* ... */ } virtual ~KLightClientSession() { /* ... */ } void Initialize(KLightSession *parent) { /* Set member variables. */ - this->parent = parent; + m_parent = parent; } virtual void Destroy() override; static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } - constexpr const KLightSession *GetParent() const { return this->parent; } + constexpr const KLightSession *GetParent() const { return m_parent; } Result SendSyncRequest(u32 *data); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_condition_variable.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_condition_variable.hpp index a05e387a3..5fdc7ac85 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_condition_variable.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_condition_variable.hpp @@ -24,9 +24,9 @@ namespace ams::kern { class KLightConditionVariable { private: - KThreadQueue thread_queue; + KThreadQueue m_thread_queue; public: - constexpr ALWAYS_INLINE KLightConditionVariable() : thread_queue() { /* ... */ } + constexpr ALWAYS_INLINE KLightConditionVariable() : m_thread_queue() { /* ... */ } private: void WaitImpl(KLightLock *lock, s64 timeout) { KThread *owner = GetCurrentThreadPointer(); @@ -37,7 +37,7 @@ namespace ams::kern { KScopedSchedulerLockAndSleep lk(&timer, owner, timeout); lock->Unlock(); - if (!this->thread_queue.SleepThread(owner)) { + if (!m_thread_queue.SleepThread(owner)) { lk.CancelSleep(); return; } @@ -56,7 +56,7 @@ namespace ams::kern { void Broadcast() { KScopedSchedulerLock lk; - while (this->thread_queue.WakeupFrontThread() != nullptr) { + while (m_thread_queue.WakeupFrontThread() != nullptr) { /* We want to signal all threads, and so should continue waking up until there's nothing to wake. */ } } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp index 0949a5902..c25c963a2 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp @@ -23,9 +23,9 @@ namespace ams::kern { class KLightLock { private: - std::atomic tag; + std::atomic m_tag; public: - constexpr KLightLock() : tag(0) { /* ... */ } + constexpr KLightLock() : m_tag(0) { /* ... */ } void Lock() { MESOSPHERE_ASSERT_THIS(); @@ -34,9 +34,9 @@ namespace ams::kern { const uintptr_t cur_thread_tag = (cur_thread | 1); while (true) { - uintptr_t old_tag = this->tag.load(std::memory_order_relaxed); + uintptr_t old_tag = m_tag.load(std::memory_order_relaxed); - while (!this->tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) { + while (!m_tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) { if ((old_tag | 1) == cur_thread_tag) { return; } @@ -59,14 +59,14 @@ namespace ams::kern { if (expected != cur_thread) { return this->UnlockSlowPath(cur_thread); } - } while (!this->tag.compare_exchange_weak(expected, 0, std::memory_order_release)); + } while (!m_tag.compare_exchange_weak(expected, 0, std::memory_order_release)); } void LockSlowPath(uintptr_t owner, uintptr_t cur_thread); void UnlockSlowPath(uintptr_t cur_thread); - bool IsLocked() const { return this->tag != 0; } - bool IsLockedByCurrentThread() const { return (this->tag | 0x1ul) == (reinterpret_cast(GetCurrentThreadPointer()) | 0x1ul); } + bool IsLocked() const { return m_tag != 0; } + bool IsLockedByCurrentThread() const { return (m_tag | 0x1ul) == (reinterpret_cast(GetCurrentThreadPointer()) | 0x1ul); } }; using KScopedLightLock = KScopedLock; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_server_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_server_session.hpp index 7dd58c031..8d8148c31 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_server_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_server_session.hpp @@ -26,24 +26,24 @@ namespace ams::kern { class KLightServerSession final : public KAutoObjectWithSlabHeapAndContainer, public util::IntrusiveListBaseNode { MESOSPHERE_AUTOOBJECT_TRAITS(KLightServerSession, KAutoObject); private: - KLightSession *parent; - KThreadQueue request_queue; - KThreadQueue server_queue; - KThread *current_request; - KThread *server_thread; + KLightSession *m_parent; + KThreadQueue m_request_queue; + KThreadQueue m_server_queue; + KThread *m_current_request; + KThread *m_server_thread; public: - constexpr KLightServerSession() : parent(), request_queue(), server_queue(), current_request(), server_thread() { /* ... */ } + constexpr KLightServerSession() : m_parent(), m_request_queue(), m_server_queue(), m_current_request(), m_server_thread() { /* ... */ } virtual ~KLightServerSession() { /* ... */ } void Initialize(KLightSession *parent) { /* Set member variables. */ - this->parent = parent; + m_parent = parent; } virtual void Destroy() override; static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } - constexpr const KLightSession *GetParent() const { return this->parent; } + constexpr const KLightSession *GetParent() const { return m_parent; } Result OnRequest(KThread *request_thread); Result ReplyAndReceive(u32 *data); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp index d37f69b48..4b0c46552 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp @@ -38,16 +38,16 @@ namespace ams::kern { static constexpr size_t DataSize = sizeof(u32) * 7; static constexpr u32 ReplyFlag = (1u << (BITSIZEOF(u32) - 1)); private: - KLightServerSession server; - KLightClientSession client; - State state; - KClientPort *port; - uintptr_t name; - KProcess *process; - bool initialized; + KLightServerSession m_server; + KLightClientSession m_client; + State m_state; + KClientPort *m_port; + uintptr_t m_name; + KProcess *m_process; + bool m_initialized; public: constexpr KLightSession() - : server(), client(), state(State::Invalid), port(), name(), process(), initialized() + : m_server(), m_client(), m_state(State::Invalid), m_port(), m_name(), m_process(), m_initialized() { /* ... */ } @@ -57,23 +57,23 @@ namespace ams::kern { void Initialize(KClientPort *client_port, uintptr_t name); virtual void Finalize() override; - virtual bool IsInitialized() const override { return this->initialized; } - virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->process); } + virtual bool IsInitialized() const override { return m_initialized; } + virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(m_process); } static void PostDestroy(uintptr_t arg); void OnServerClosed(); void OnClientClosed(); - bool IsServerClosed() const { return this->state != State::Normal; } - bool IsClientClosed() const { return this->state != State::Normal; } + bool IsServerClosed() const { return m_state != State::Normal; } + bool IsClientClosed() const { return m_state != State::Normal; } - Result OnRequest(KThread *request_thread) { return this->server.OnRequest(request_thread); } + Result OnRequest(KThread *request_thread) { return m_server.OnRequest(request_thread); } - KLightClientSession &GetClientSession() { return this->client; } - KLightServerSession &GetServerSession() { return this->server; } - const KLightClientSession &GetClientSession() const { return this->client; } - const KLightServerSession &GetServerSession() const { return this->server; } + KLightClientSession &GetClientSession() { return m_client; } + KLightServerSession &GetServerSession() { return m_server; } + const KLightClientSession &GetClientSession() const { return m_client; } + const KLightServerSession &GetServerSession() const { return m_server; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp index 23f86fe66..2ed4cb900 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp @@ -22,17 +22,17 @@ namespace ams::kern { class KLinkedListNode : public util::IntrusiveListBaseNode, public KSlabAllocated { private: - void *item; + void *m_item; public: - constexpr KLinkedListNode() : util::IntrusiveListBaseNode(), item(nullptr) { MESOSPHERE_ASSERT_THIS(); } + constexpr KLinkedListNode() : util::IntrusiveListBaseNode(), m_item(nullptr) { MESOSPHERE_ASSERT_THIS(); } constexpr void Initialize(void *it) { MESOSPHERE_ASSERT_THIS(); - this->item = it; + m_item = it; } constexpr void *GetItem() const { - return this->item; + return m_item; } }; static_assert(sizeof(KLinkedListNode) == sizeof(util::IntrusiveListNode) + sizeof(void *)); @@ -69,16 +69,16 @@ namespace ams::kern { using pointer = typename std::conditional::type; using reference = typename std::conditional::type; private: - BaseIterator base_it; + BaseIterator m_base_it; public: - explicit Iterator(BaseIterator it) : base_it(it) { /* ... */ } + explicit Iterator(BaseIterator it) : m_base_it(it) { /* ... */ } pointer GetItem() const { - return static_cast(this->base_it->GetItem()); + return static_cast(m_base_it->GetItem()); } bool operator==(const Iterator &rhs) const { - return this->base_it == rhs.base_it; + return m_base_it == rhs.m_base_it; } bool operator!=(const Iterator &rhs) const { @@ -94,12 +94,12 @@ namespace ams::kern { } Iterator &operator++() { - ++this->base_it; + ++m_base_it; return *this; } Iterator &operator--() { - --this->base_it; + --m_base_it; return *this; } @@ -116,7 +116,7 @@ namespace ams::kern { } operator Iterator() const { - return Iterator(this->base_it); + return Iterator(m_base_it); } }; public: @@ -205,7 +205,7 @@ namespace ams::kern { KLinkedListNode *node = KLinkedListNode::Allocate(); MESOSPHERE_ABORT_UNLESS(node != nullptr); node->Initialize(std::addressof(ref)); - return iterator(BaseList::insert(pos.base_it, *node)); + return iterator(BaseList::insert(pos.m_base_it, *node)); } void push_back(reference ref) { @@ -225,8 +225,8 @@ namespace ams::kern { } iterator erase(const iterator pos) { - KLinkedListNode *freed_node = std::addressof(*pos.base_it); - iterator ret = iterator(BaseList::erase(pos.base_it)); + KLinkedListNode *freed_node = std::addressof(*pos.m_base_it); + iterator ret = iterator(BaseList::erase(pos.m_base_it)); KLinkedListNode::Free(freed_node); return ret; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp index 14e233d35..a262e99b6 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp @@ -189,38 +189,38 @@ namespace ams::kern { }; struct KMemoryInfo { - uintptr_t address; - size_t size; - KMemoryState state; - u16 device_disable_merge_left_count; - u16 device_disable_merge_right_count; - u16 ipc_lock_count; - u16 device_use_count; - u16 ipc_disable_merge_count; - KMemoryPermission perm; - KMemoryAttribute attribute; - KMemoryPermission original_perm; - KMemoryBlockDisableMergeAttribute disable_merge_attribute; + uintptr_t m_address; + size_t m_size; + KMemoryState m_state; + u16 m_device_disable_merge_left_count; + u16 m_device_disable_merge_right_count; + u16 m_ipc_lock_count; + u16 m_device_use_count; + u16 m_ipc_disable_merge_count; + KMemoryPermission m_perm; + KMemoryAttribute m_attribute; + KMemoryPermission m_original_perm; + KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; constexpr ams::svc::MemoryInfo GetSvcMemoryInfo() const { return { - .addr = this->address, - .size = this->size, - .state = static_cast(this->state & KMemoryState_Mask), - .attr = static_cast(this->attribute & KMemoryAttribute_UserMask), - .perm = static_cast(this->perm & KMemoryPermission_UserMask), - .ipc_refcount = this->ipc_lock_count, - .device_refcount = this->device_use_count, + .addr = m_address, + .size = m_size, + .state = static_cast(m_state & KMemoryState_Mask), + .attr = static_cast(m_attribute & KMemoryAttribute_UserMask), + .perm = static_cast(m_perm & KMemoryPermission_UserMask), + .ipc_refcount = m_ipc_lock_count, + .device_refcount = m_device_use_count, .padding = {}, }; } constexpr uintptr_t GetAddress() const { - return this->address; + return m_address; } constexpr size_t GetSize() const { - return this->size; + return m_size; } constexpr size_t GetNumPages() const { @@ -236,48 +236,48 @@ namespace ams::kern { } constexpr u16 GetIpcLockCount() const { - return this->ipc_lock_count; + return m_ipc_lock_count; } constexpr u16 GetIpcDisableMergeCount() const { - return this->ipc_disable_merge_count; + return m_ipc_disable_merge_count; } constexpr KMemoryState GetState() const { - return this->state; + return m_state; } constexpr KMemoryPermission GetPermission() const { - return this->perm; + return m_perm; } constexpr KMemoryPermission GetOriginalPermission() const { - return this->original_perm; + return m_original_perm; } constexpr KMemoryAttribute GetAttribute() const { - return this->attribute; + return m_attribute; } constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { - return this->disable_merge_attribute; + return m_disable_merge_attribute; } }; class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode { private: - u16 device_disable_merge_left_count; - u16 device_disable_merge_right_count; - KProcessAddress address; - size_t num_pages; - KMemoryState memory_state; - u16 ipc_lock_count; - u16 device_use_count; - u16 ipc_disable_merge_count; - KMemoryPermission perm; - KMemoryPermission original_perm; - KMemoryAttribute attribute; - KMemoryBlockDisableMergeAttribute disable_merge_attribute; + u16 m_device_disable_merge_left_count; + u16 m_device_disable_merge_right_count; + KProcessAddress m_address; + size_t m_num_pages; + KMemoryState m_memory_state; + u16 m_ipc_lock_count; + u16 m_device_use_count; + u16 m_ipc_disable_merge_count; + KMemoryPermission m_perm; + KMemoryPermission m_original_perm; + KMemoryAttribute m_attribute; + KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; public: static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) { if (lhs.GetAddress() < rhs.GetAddress()) { @@ -290,11 +290,11 @@ namespace ams::kern { } public: constexpr KProcessAddress GetAddress() const { - return this->address; + return m_address; } constexpr size_t GetNumPages() const { - return this->num_pages; + return m_num_pages; } constexpr size_t GetSize() const { @@ -310,87 +310,87 @@ namespace ams::kern { } constexpr u16 GetIpcLockCount() const { - return this->ipc_lock_count; + return m_ipc_lock_count; } constexpr u16 GetIpcDisableMergeCount() const { - return this->ipc_disable_merge_count; + return m_ipc_disable_merge_count; } constexpr KMemoryPermission GetPermission() const { - return this->perm; + return m_perm; } constexpr KMemoryPermission GetOriginalPermission() const { - return this->original_perm; + return m_original_perm; } constexpr KMemoryAttribute GetAttribute() const { - return this->attribute; + return m_attribute; } constexpr KMemoryInfo GetMemoryInfo() const { return { - .address = GetInteger(this->GetAddress()), - .size = this->GetSize(), - .state = this->memory_state, - .device_disable_merge_left_count = this->device_disable_merge_left_count, - .device_disable_merge_right_count = this->device_disable_merge_right_count, - .ipc_lock_count = this->ipc_lock_count, - .device_use_count = this->device_use_count, - .ipc_disable_merge_count = this->ipc_disable_merge_count, - .perm = this->perm, - .attribute = this->attribute, - .original_perm = this->original_perm, - .disable_merge_attribute = this->disable_merge_attribute, + .m_address = GetInteger(this->GetAddress()), + .m_size = this->GetSize(), + .m_state = m_memory_state, + .m_device_disable_merge_left_count = m_device_disable_merge_left_count, + .m_device_disable_merge_right_count = m_device_disable_merge_right_count, + .m_ipc_lock_count = m_ipc_lock_count, + .m_device_use_count = m_device_use_count, + .m_ipc_disable_merge_count = m_ipc_disable_merge_count, + .m_perm = m_perm, + .m_attribute = m_attribute, + .m_original_perm = m_original_perm, + .m_disable_merge_attribute = m_disable_merge_attribute, }; } public: constexpr KMemoryBlock() - : device_disable_merge_left_count(), device_disable_merge_right_count(), address(), num_pages(), memory_state(KMemoryState_None), ipc_lock_count(), device_use_count(), ipc_disable_merge_count(), perm(), original_perm(), attribute(), disable_merge_attribute() + : m_device_disable_merge_left_count(), m_device_disable_merge_right_count(), m_address(), m_num_pages(), m_memory_state(KMemoryState_None), m_ipc_lock_count(), m_device_use_count(), m_ipc_disable_merge_count(), m_perm(), m_original_perm(), m_attribute(), m_disable_merge_attribute() { /* ... */ } constexpr KMemoryBlock(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) - : device_disable_merge_left_count(), device_disable_merge_right_count(), address(addr), num_pages(np), memory_state(ms), ipc_lock_count(0), device_use_count(0), ipc_disable_merge_count(), perm(p), original_perm(KMemoryPermission_None), attribute(attr), disable_merge_attribute() + : m_device_disable_merge_left_count(), m_device_disable_merge_right_count(), m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0), m_device_use_count(0), m_ipc_disable_merge_count(), m_perm(p), m_original_perm(KMemoryPermission_None), m_attribute(attr), m_disable_merge_attribute() { /* ... */ } constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) { MESOSPHERE_ASSERT_THIS(); - this->address = addr; - this->num_pages = np; - this->memory_state = ms; - this->ipc_lock_count = 0; - this->device_use_count = 0; - this->perm = p; - this->original_perm = KMemoryPermission_None; - this->attribute = attr; + m_address = addr; + m_num_pages = np; + m_memory_state = ms; + m_ipc_lock_count = 0; + m_device_use_count = 0; + m_perm = p; + m_original_perm = KMemoryPermission_None; + m_attribute = attr; } constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { MESOSPHERE_ASSERT_THIS(); constexpr auto AttributeIgnoreMask = KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared; - return this->memory_state == s && this->perm == p && (this->attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); + return m_memory_state == s && m_perm == p && (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); } constexpr bool HasSameProperties(const KMemoryBlock &rhs) const { MESOSPHERE_ASSERT_THIS(); - return this->memory_state == rhs.memory_state && - this->perm == rhs.perm && - this->original_perm == rhs.original_perm && - this->attribute == rhs.attribute && - this->ipc_lock_count == rhs.ipc_lock_count && - this->device_use_count == rhs.device_use_count; + return m_memory_state == rhs.m_memory_state && + m_perm == rhs.m_perm && + m_original_perm == rhs.m_original_perm && + m_attribute == rhs.m_attribute && + m_ipc_lock_count == rhs.m_ipc_lock_count && + m_device_use_count == rhs.m_device_use_count; } constexpr bool CanMergeWith(const KMemoryBlock &rhs) const { return this->HasSameProperties(rhs) && - (this->disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllRight) == 0 && - (rhs.disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllLeft) == 0; + (m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllRight) == 0 && + (rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllLeft) == 0; } constexpr bool Contains(KProcessAddress addr) const { @@ -404,25 +404,25 @@ namespace ams::kern { MESOSPHERE_ASSERT(added_block.GetNumPages() > 0); MESOSPHERE_ASSERT(this->GetAddress() + added_block.GetSize() - 1 < this->GetEndAddress() + added_block.GetSize() - 1); - this->num_pages += added_block.GetNumPages(); - this->disable_merge_attribute = static_cast(this->disable_merge_attribute | added_block.disable_merge_attribute); - this->device_disable_merge_right_count = added_block.device_disable_merge_right_count; + m_num_pages += added_block.GetNumPages(); + m_disable_merge_attribute = static_cast(m_disable_merge_attribute | added_block.m_disable_merge_attribute); + m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count; } constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a, bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) { MESOSPHERE_ASSERT_THIS(); - MESOSPHERE_ASSERT(this->original_perm == KMemoryPermission_None); - MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_IpcLocked) == 0); + MESOSPHERE_ASSERT(m_original_perm == KMemoryPermission_None); + MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_IpcLocked) == 0); - this->memory_state = s; - this->perm = p; - this->attribute = static_cast(a | (this->attribute & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared))); + m_memory_state = s; + m_perm = p; + m_attribute = static_cast(a | (m_attribute & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared))); if (set_disable_merge_attr && set_mask != 0) { - this->disable_merge_attribute = static_cast(this->disable_merge_attribute | set_mask); + m_disable_merge_attribute = static_cast(m_disable_merge_attribute | set_mask); } if (clear_mask != 0) { - this->disable_merge_attribute = static_cast(this->disable_merge_attribute & ~clear_mask); + m_disable_merge_attribute = static_cast(m_disable_merge_attribute & ~clear_mask); } } @@ -432,25 +432,25 @@ namespace ams::kern { MESOSPHERE_ASSERT(this->Contains(addr)); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), PageSize)); - block->address = this->address; - block->num_pages = (addr - this->GetAddress()) / PageSize; - block->memory_state = this->memory_state; - block->ipc_lock_count = this->ipc_lock_count; - block->device_use_count = this->device_use_count; - block->perm = this->perm; - block->original_perm = this->original_perm; - block->attribute = this->attribute; - block->disable_merge_attribute = static_cast(this->disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllLeft); - block->ipc_disable_merge_count = this->ipc_disable_merge_count; - block->device_disable_merge_left_count = this->device_disable_merge_left_count; - block->device_disable_merge_right_count = 0; + block->m_address = m_address; + block->m_num_pages = (addr - this->GetAddress()) / PageSize; + block->m_memory_state = m_memory_state; + block->m_ipc_lock_count = m_ipc_lock_count; + block->m_device_use_count = m_device_use_count; + block->m_perm = m_perm; + block->m_original_perm = m_original_perm; + block->m_attribute = m_attribute; + block->m_disable_merge_attribute = static_cast(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllLeft); + block->m_ipc_disable_merge_count = m_ipc_disable_merge_count; + block->m_device_disable_merge_left_count = m_device_disable_merge_left_count; + block->m_device_disable_merge_right_count = 0; - this->address = addr; - this->num_pages -= block->num_pages; + m_address = addr; + m_num_pages -= block->m_num_pages; - this->ipc_disable_merge_count = 0; - this->device_disable_merge_left_count = 0; - this->disable_merge_attribute = static_cast(this->disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllRight); + m_ipc_disable_merge_count = 0; + m_device_disable_merge_left_count = 0; + m_disable_merge_attribute = static_cast(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllRight); } constexpr void UpdateDeviceDisableMergeStateForShareLeft(KMemoryPermission new_perm, bool left, bool right) { @@ -458,8 +458,8 @@ namespace ams::kern { MESOSPHERE_UNUSED(new_perm, right); if (left) { - this->disable_merge_attribute = static_cast(this->disable_merge_attribute | KMemoryBlockDisableMergeAttribute_DeviceLeft); - const u16 new_device_disable_merge_left_count = ++this->device_disable_merge_left_count; + m_disable_merge_attribute = static_cast(m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute_DeviceLeft); + const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count; MESOSPHERE_ABORT_UNLESS(new_device_disable_merge_left_count > 0); } } @@ -469,8 +469,8 @@ namespace ams::kern { MESOSPHERE_UNUSED(new_perm, left); if (right) { - this->disable_merge_attribute = static_cast(this->disable_merge_attribute | KMemoryBlockDisableMergeAttribute_DeviceRight); - const u16 new_device_disable_merge_right_count = ++this->device_disable_merge_right_count; + m_disable_merge_attribute = static_cast(m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute_DeviceRight); + const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count; MESOSPHERE_ABORT_UNLESS(new_device_disable_merge_right_count > 0); } } @@ -485,13 +485,13 @@ namespace ams::kern { MESOSPHERE_UNUSED(new_perm); /* We must either be shared or have a zero lock count. */ - MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared || this->device_use_count == 0); + MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared || m_device_use_count == 0); /* Share. */ - const u16 new_count = ++this->device_use_count; + const u16 new_count = ++m_device_use_count; MESOSPHERE_ABORT_UNLESS(new_count > 0); - this->attribute = static_cast(this->attribute | KMemoryAttribute_DeviceShared); + m_attribute = static_cast(m_attribute | KMemoryAttribute_DeviceShared); this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right); } @@ -501,16 +501,16 @@ namespace ams::kern { MESOSPHERE_UNUSED(new_perm, right); if (left) { - if (!this->device_disable_merge_left_count) { + if (!m_device_disable_merge_left_count) { return; } - --this->device_disable_merge_left_count; + --m_device_disable_merge_left_count; } - this->device_disable_merge_left_count = std::min(this->device_disable_merge_left_count, this->device_use_count); + m_device_disable_merge_left_count = std::min(m_device_disable_merge_left_count, m_device_use_count); - if (this->device_disable_merge_left_count == 0) { - this->disable_merge_attribute = static_cast(this->disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_DeviceLeft); + if (m_device_disable_merge_left_count == 0) { + m_disable_merge_attribute = static_cast(m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_DeviceLeft); } } @@ -519,10 +519,10 @@ namespace ams::kern { MESOSPHERE_UNUSED(new_perm, left); if (right) { - const u16 old_device_disable_merge_right_count = this->device_disable_merge_right_count--; + const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; MESOSPHERE_ASSERT(old_device_disable_merge_right_count > 0); if (old_device_disable_merge_right_count == 1) { - this->disable_merge_attribute = static_cast(this->disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_DeviceRight); + m_disable_merge_attribute = static_cast(m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_DeviceRight); } } } @@ -537,14 +537,14 @@ namespace ams::kern { MESOSPHERE_UNUSED(new_perm); /* We must be shared. */ - MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared); + MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared); /* Unhare. */ - const u16 old_count = this->device_use_count--; + const u16 old_count = m_device_use_count--; MESOSPHERE_ABORT_UNLESS(old_count > 0); if (old_count == 1) { - this->attribute = static_cast(this->attribute & ~KMemoryAttribute_DeviceShared); + m_attribute = static_cast(m_attribute & ~KMemoryAttribute_DeviceShared); } this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right); @@ -555,14 +555,14 @@ namespace ams::kern { MESOSPHERE_UNUSED(new_perm); /* We must be shared. */ - MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared); + MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared); /* Unhare. */ - const u16 old_count = this->device_use_count--; + const u16 old_count = m_device_use_count--; MESOSPHERE_ABORT_UNLESS(old_count > 0); if (old_count == 1) { - this->attribute = static_cast(this->attribute & ~KMemoryAttribute_DeviceShared); + m_attribute = static_cast(m_attribute & ~KMemoryAttribute_DeviceShared); } this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); @@ -570,25 +570,25 @@ namespace ams::kern { constexpr void LockForIpc(KMemoryPermission new_perm, bool left, bool right) { /* We must either be locked or have a zero lock count. */ - MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked || this->ipc_lock_count == 0); + MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked || m_ipc_lock_count == 0); /* Lock. */ - const u16 new_lock_count = ++this->ipc_lock_count; + const u16 new_lock_count = ++m_ipc_lock_count; MESOSPHERE_ABORT_UNLESS(new_lock_count > 0); /* If this is our first lock, update our permissions. */ if (new_lock_count == 1) { - MESOSPHERE_ASSERT(this->original_perm == KMemoryPermission_None); - MESOSPHERE_ASSERT((this->perm | new_perm | KMemoryPermission_NotMapped) == (this->perm | KMemoryPermission_NotMapped)); - MESOSPHERE_ASSERT((this->perm & KMemoryPermission_UserExecute) != KMemoryPermission_UserExecute || (new_perm == KMemoryPermission_UserRead)); - this->original_perm = this->perm; - this->perm = static_cast((new_perm & KMemoryPermission_IpcLockChangeMask) | (this->original_perm & ~KMemoryPermission_IpcLockChangeMask)); + MESOSPHERE_ASSERT(m_original_perm == KMemoryPermission_None); + MESOSPHERE_ASSERT((m_perm | new_perm | KMemoryPermission_NotMapped) == (m_perm | KMemoryPermission_NotMapped)); + MESOSPHERE_ASSERT((m_perm & KMemoryPermission_UserExecute) != KMemoryPermission_UserExecute || (new_perm == KMemoryPermission_UserRead)); + m_original_perm = m_perm; + m_perm = static_cast((new_perm & KMemoryPermission_IpcLockChangeMask) | (m_original_perm & ~KMemoryPermission_IpcLockChangeMask)); } - this->attribute = static_cast(this->attribute | KMemoryAttribute_IpcLocked); + m_attribute = static_cast(m_attribute | KMemoryAttribute_IpcLocked); if (left) { - this->disable_merge_attribute = static_cast(this->disable_merge_attribute | KMemoryBlockDisableMergeAttribute_IpcLeft); - const u16 new_ipc_disable_merge_count = ++this->ipc_disable_merge_count; + m_disable_merge_attribute = static_cast(m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute_IpcLeft); + const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count; MESOSPHERE_ABORT_UNLESS(new_ipc_disable_merge_count > 0); } MESOSPHERE_UNUSED(right); @@ -599,32 +599,32 @@ namespace ams::kern { MESOSPHERE_UNUSED(new_perm); /* We must be locked. */ - MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked); + MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked); /* Unlock. */ - const u16 old_lock_count = this->ipc_lock_count--; + const u16 old_lock_count = m_ipc_lock_count--; MESOSPHERE_ABORT_UNLESS(old_lock_count > 0); /* If this is our last unlock, update our permissions. */ if (old_lock_count == 1) { - MESOSPHERE_ASSERT(this->original_perm != KMemoryPermission_None); - this->perm = this->original_perm; - this->original_perm = KMemoryPermission_None; - this->attribute = static_cast(this->attribute & ~KMemoryAttribute_IpcLocked); + MESOSPHERE_ASSERT(m_original_perm != KMemoryPermission_None); + m_perm = m_original_perm; + m_original_perm = KMemoryPermission_None; + m_attribute = static_cast(m_attribute & ~KMemoryAttribute_IpcLocked); } if (left) { - const u16 old_ipc_disable_merge_count = this->ipc_disable_merge_count--; + const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--; MESOSPHERE_ASSERT(old_ipc_disable_merge_count > 0); if (old_ipc_disable_merge_count == 1) { - this->disable_merge_attribute = static_cast(this->disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_IpcLeft); + m_disable_merge_attribute = static_cast(m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_IpcLeft); } } MESOSPHERE_UNUSED(right); } constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { - return this->disable_merge_attribute; + return m_disable_merge_attribute; } }; static_assert(std::is_trivially_destructible::value); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp index 54dfcdd71..605711891 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp @@ -24,16 +24,16 @@ namespace ams::kern { public: static constexpr size_t MaxBlocks = 2; private: - KMemoryBlock *blocks[MaxBlocks]; - size_t index; - KMemoryBlockSlabManager *slab_manager; + KMemoryBlock *m_blocks[MaxBlocks]; + size_t m_index; + KMemoryBlockSlabManager *m_slab_manager; public: - constexpr explicit KMemoryBlockManagerUpdateAllocator(KMemoryBlockSlabManager *sm) : blocks(), index(MaxBlocks), slab_manager(sm) { /* ... */ } + constexpr explicit KMemoryBlockManagerUpdateAllocator(KMemoryBlockSlabManager *sm) : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) { /* ... */ } ~KMemoryBlockManagerUpdateAllocator() { - for (const auto &block : this->blocks) { + for (const auto &block : m_blocks) { if (block != nullptr) { - this->slab_manager->Free(block); + m_slab_manager->Free(block); } } } @@ -43,32 +43,32 @@ namespace ams::kern { MESOSPHERE_ASSERT(num_blocks <= MaxBlocks); /* Set index. */ - this->index = MaxBlocks - num_blocks; + m_index = MaxBlocks - num_blocks; /* Allocate the blocks. */ for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) { - this->blocks[this->index + i] = this->slab_manager->Allocate(); - R_UNLESS(this->blocks[this->index + i] != nullptr, svc::ResultOutOfResource()); + m_blocks[m_index + i] = m_slab_manager->Allocate(); + R_UNLESS(m_blocks[m_index + i] != nullptr, svc::ResultOutOfResource()); } return ResultSuccess(); } KMemoryBlock *Allocate() { - MESOSPHERE_ABORT_UNLESS(this->index < MaxBlocks); - MESOSPHERE_ABORT_UNLESS(this->blocks[this->index] != nullptr); + MESOSPHERE_ABORT_UNLESS(m_index < MaxBlocks); + MESOSPHERE_ABORT_UNLESS(m_blocks[m_index] != nullptr); KMemoryBlock *block = nullptr; - std::swap(block, this->blocks[this->index++]); + std::swap(block, m_blocks[m_index++]); return block; } void Free(KMemoryBlock *block) { - MESOSPHERE_ABORT_UNLESS(this->index <= MaxBlocks); + MESOSPHERE_ABORT_UNLESS(m_index <= MaxBlocks); MESOSPHERE_ABORT_UNLESS(block != nullptr); - if (this->index == 0) { - this->slab_manager->Free(block); + if (m_index == 0) { + m_slab_manager->Free(block); } else { - this->blocks[--this->index] = block; + m_blocks[--m_index] = block; } } }; @@ -80,17 +80,17 @@ namespace ams::kern { using iterator = MemoryBlockTree::iterator; using const_iterator = MemoryBlockTree::const_iterator; private: - MemoryBlockTree memory_block_tree; - KProcessAddress start_address; - KProcessAddress end_address; + MemoryBlockTree m_memory_block_tree; + KProcessAddress m_start_address; + KProcessAddress m_end_address; private: void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages); public: - constexpr KMemoryBlockManager() : memory_block_tree(), start_address(), end_address() { /* ... */ } + constexpr KMemoryBlockManager() : m_memory_block_tree(), m_start_address(), m_end_address() { /* ... */ } - iterator end() { return this->memory_block_tree.end(); } - const_iterator end() const { return this->memory_block_tree.end(); } - const_iterator cend() const { return this->memory_block_tree.cend(); } + iterator end() { return m_memory_block_tree.end(); } + const_iterator end() const { return m_memory_block_tree.end(); } + const_iterator cend() const { return m_memory_block_tree.cend(); } Result Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager); void Finalize(KMemoryBlockSlabManager *slab_manager); @@ -103,11 +103,11 @@ namespace ams::kern { void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr); iterator FindIterator(KProcessAddress address) const { - return this->memory_block_tree.find(KMemoryBlock(address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None)); + return m_memory_block_tree.find(KMemoryBlock(address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None)); } const KMemoryBlock *FindBlock(KProcessAddress address) const { - if (const_iterator it = this->FindIterator(address); it != this->memory_block_tree.end()) { + if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) { return std::addressof(*it); } @@ -121,11 +121,11 @@ namespace ams::kern { class KScopedMemoryBlockManagerAuditor { private: - KMemoryBlockManager *manager; + KMemoryBlockManager *m_manager; public: - explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager *m) : manager(m) { MESOSPHERE_AUDIT(this->manager->CheckState()); } + explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager *m) : m_manager(m) { MESOSPHERE_AUDIT(m_manager->CheckState()); } explicit ALWAYS_INLINE KScopedMemoryBlockManagerAuditor(KMemoryBlockManager &m) : KScopedMemoryBlockManagerAuditor(std::addressof(m)) { /* ... */ } - ALWAYS_INLINE ~KScopedMemoryBlockManagerAuditor() { MESOSPHERE_AUDIT(this->manager->CheckState()); } + ALWAYS_INLINE ~KScopedMemoryBlockManagerAuditor() { MESOSPHERE_AUDIT(m_manager->CheckState()); } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index 30c740eab..5bef180a9 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -61,50 +61,50 @@ namespace ams::kern { return (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64); } private: - KPageHeap heap; - RefCount *page_reference_counts; - KVirtualAddress management_region; - Pool pool; - Impl *next; - Impl *prev; + KPageHeap m_heap; + RefCount *m_page_reference_counts; + KVirtualAddress m_management_region; + Pool m_pool; + Impl *m_next; + Impl *m_prev; public: - Impl() : heap(), page_reference_counts(), management_region(), pool(), next(), prev() { /* ... */ } + Impl() : m_heap(), m_page_reference_counts(), m_management_region(), m_pool(), m_next(), m_prev() { /* ... */ } size_t Initialize(uintptr_t address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p); - KVirtualAddress AllocateBlock(s32 index, bool random) { return this->heap.AllocateBlock(index, random); } - void Free(KVirtualAddress addr, size_t num_pages) { this->heap.Free(addr, num_pages); } + KVirtualAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); } + void Free(KVirtualAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); } - void UpdateUsedHeapSize() { this->heap.UpdateUsedSize(); } + void UpdateUsedHeapSize() { m_heap.UpdateUsedSize(); } - void InitializeOptimizedMemory() { std::memset(GetVoidPointer(this->management_region), 0, CalculateOptimizedProcessOverheadSize(this->heap.GetSize())); } + void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); } void TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages); void TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages); bool ProcessOptimizedAllocation(KVirtualAddress block, size_t num_pages, u8 fill_pattern); - constexpr Pool GetPool() const { return this->pool; } - constexpr size_t GetSize() const { return this->heap.GetSize(); } - constexpr KVirtualAddress GetEndAddress() const { return this->heap.GetEndAddress(); } + constexpr Pool GetPool() const { return m_pool; } + constexpr size_t GetSize() const { return m_heap.GetSize(); } + constexpr KVirtualAddress GetEndAddress() const { return m_heap.GetEndAddress(); } - size_t GetFreeSize() const { return this->heap.GetFreeSize(); } + size_t GetFreeSize() const { return m_heap.GetFreeSize(); } - void DumpFreeList() const { return this->heap.DumpFreeList(); } + void DumpFreeList() const { return m_heap.DumpFreeList(); } - constexpr size_t GetPageOffset(KVirtualAddress address) const { return this->heap.GetPageOffset(address); } - constexpr size_t GetPageOffsetToEnd(KVirtualAddress address) const { return this->heap.GetPageOffsetToEnd(address); } + constexpr size_t GetPageOffset(KVirtualAddress address) const { return m_heap.GetPageOffset(address); } + constexpr size_t GetPageOffsetToEnd(KVirtualAddress address) const { return m_heap.GetPageOffsetToEnd(address); } - constexpr void SetNext(Impl *n) { this->next = n; } - constexpr void SetPrev(Impl *n) { this->prev = n; } - constexpr Impl *GetNext() const { return this->next; } - constexpr Impl *GetPrev() const { return this->prev; } + constexpr void SetNext(Impl *n) { m_next = n; } + constexpr void SetPrev(Impl *n) { m_prev = n; } + constexpr Impl *GetNext() const { return m_next; } + constexpr Impl *GetPrev() const { return m_prev; } void OpenFirst(KVirtualAddress address, size_t num_pages) { size_t index = this->GetPageOffset(address); const size_t end = index + num_pages; while (index < end) { - const RefCount ref_count = (++this->page_reference_counts[index]); + const RefCount ref_count = (++m_page_reference_counts[index]); MESOSPHERE_ABORT_UNLESS(ref_count == 1); index++; @@ -115,7 +115,7 @@ namespace ams::kern { size_t index = this->GetPageOffset(address); const size_t end = index + num_pages; while (index < end) { - const RefCount ref_count = (++this->page_reference_counts[index]); + const RefCount ref_count = (++m_page_reference_counts[index]); MESOSPHERE_ABORT_UNLESS(ref_count > 1); index++; @@ -129,8 +129,8 @@ namespace ams::kern { size_t free_start = 0; size_t free_count = 0; while (index < end) { - MESOSPHERE_ABORT_UNLESS(this->page_reference_counts[index] > 0); - const RefCount ref_count = (--this->page_reference_counts[index]); + MESOSPHERE_ABORT_UNLESS(m_page_reference_counts[index] > 0); + const RefCount ref_count = (--m_page_reference_counts[index]); /* Keep track of how many zero refcounts we see in a row, to minimize calls to free. */ if (ref_count == 0) { @@ -142,7 +142,7 @@ namespace ams::kern { } } else { if (free_count > 0) { - this->Free(this->heap.GetAddress() + free_start * PageSize, free_count); + this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); free_count = 0; } } @@ -151,25 +151,25 @@ namespace ams::kern { } if (free_count > 0) { - this->Free(this->heap.GetAddress() + free_start * PageSize, free_count); + this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); } } }; private: - KLightLock pool_locks[Pool_Count]; - Impl *pool_managers_head[Pool_Count]; - Impl *pool_managers_tail[Pool_Count]; - Impl managers[MaxManagerCount]; - size_t num_managers; - u64 optimized_process_ids[Pool_Count]; - bool has_optimized_process[Pool_Count]; + KLightLock m_pool_locks[Pool_Count]; + Impl *m_pool_managers_head[Pool_Count]; + Impl *m_pool_managers_tail[Pool_Count]; + Impl m_managers[MaxManagerCount]; + size_t m_num_managers; + u64 m_optimized_process_ids[Pool_Count]; + bool m_has_optimized_process[Pool_Count]; private: Impl &GetManager(KVirtualAddress address) { - return this->managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()]; + return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()]; } constexpr Impl *GetFirstManager(Pool pool, Direction dir) { - return dir == Direction_FromBack ? this->pool_managers_tail[pool] : this->pool_managers_head[pool]; + return dir == Direction_FromBack ? m_pool_managers_tail[pool] : m_pool_managers_head[pool]; } constexpr Impl *GetNextManager(Impl *cur, Direction dir) { @@ -183,7 +183,7 @@ namespace ams::kern { Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random); public: KMemoryManager() - : pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process() + : m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process() { /* ... */ } @@ -204,7 +204,7 @@ namespace ams::kern { const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); { - KScopedLightLock lk(this->pool_locks[manager.GetPool()]); + KScopedLightLock lk(m_pool_locks[manager.GetPool()]); manager.Open(address, cur_pages); } @@ -220,7 +220,7 @@ namespace ams::kern { const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); { - KScopedLightLock lk(this->pool_locks[manager.GetPool()]); + KScopedLightLock lk(m_pool_locks[manager.GetPool()]); manager.Close(address, cur_pages); } @@ -231,8 +231,8 @@ namespace ams::kern { size_t GetSize() { size_t total = 0; - for (size_t i = 0; i < this->num_managers; i++) { - total += this->managers[i].GetSize(); + for (size_t i = 0; i < m_num_managers; i++) { + total += m_managers[i].GetSize(); } return total; } @@ -248,15 +248,15 @@ namespace ams::kern { size_t GetFreeSize() { size_t total = 0; - for (size_t i = 0; i < this->num_managers; i++) { - KScopedLightLock lk(this->pool_locks[this->managers[i].GetPool()]); - total += this->managers[i].GetFreeSize(); + for (size_t i = 0; i < m_num_managers; i++) { + KScopedLightLock lk(m_pool_locks[m_managers[i].GetPool()]); + total += m_managers[i].GetFreeSize(); } return total; } size_t GetFreeSize(Pool pool) { - KScopedLightLock lk(this->pool_locks[pool]); + KScopedLightLock lk(m_pool_locks[pool]); constexpr Direction GetSizeDirection = Direction_FromFront; size_t total = 0; @@ -267,7 +267,7 @@ namespace ams::kern { } void DumpFreeList(Pool pool) { - KScopedLightLock lk(this->pool_locks[pool]); + KScopedLightLock lk(m_pool_locks[pool]); constexpr Direction DumpDirection = Direction_FromFront; for (auto *manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr; manager = this->GetNextManager(manager, DumpDirection)) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_region.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_region.hpp index af88139e7..f31716e82 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_region.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_region.hpp @@ -27,11 +27,11 @@ namespace ams::kern { private: friend class KMemoryRegionTree; private: - uintptr_t address; - uintptr_t pair_address; - uintptr_t last_address; - u32 attributes; - u32 type_id; + uintptr_t m_address; + uintptr_t m_pair_address; + uintptr_t m_last_address; + u32 m_attributes; + u32 m_type_id; public: static constexpr ALWAYS_INLINE int Compare(const KMemoryRegion &lhs, const KMemoryRegion &rhs) { if (lhs.GetAddress() < rhs.GetAddress()) { @@ -43,32 +43,32 @@ namespace ams::kern { } } public: - constexpr ALWAYS_INLINE KMemoryRegion() : address(0), pair_address(0), last_address(0), attributes(0), type_id(0) { /* ... */ } + constexpr ALWAYS_INLINE KMemoryRegion() : m_address(0), m_pair_address(0), m_last_address(0), m_attributes(0), m_type_id(0) { /* ... */ } constexpr ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t la, uintptr_t p, u32 r, u32 t) : - address(a), pair_address(p), last_address(la), attributes(r), type_id(t) + m_address(a), m_pair_address(p), m_last_address(la), m_attributes(r), m_type_id(t) { /* ... */ } constexpr ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t la, u32 r, u32 t) : KMemoryRegion(a, la, std::numeric_limits::max(), r, t) { /* ... */ } private: constexpr ALWAYS_INLINE void Reset(uintptr_t a, uintptr_t la, uintptr_t p, u32 r, u32 t) { - this->address = a; - this->pair_address = p; - this->last_address = la; - this->attributes = r; - this->type_id = t; + m_address = a; + m_pair_address = p; + m_last_address = la; + m_attributes = r; + m_type_id = t; } public: constexpr ALWAYS_INLINE uintptr_t GetAddress() const { - return this->address; + return m_address; } constexpr ALWAYS_INLINE uintptr_t GetPairAddress() const { - return this->pair_address; + return m_pair_address; } constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const { - return this->last_address; + return m_last_address; } constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const { @@ -80,16 +80,16 @@ namespace ams::kern { } constexpr ALWAYS_INLINE u32 GetAttributes() const { - return this->attributes; + return m_attributes; } constexpr ALWAYS_INLINE u32 GetType() const { - return this->type_id; + return m_type_id; } constexpr ALWAYS_INLINE void SetType(u32 type) { MESOSPHERE_INIT_ABORT_UNLESS(this->CanDerive(type)); - this->type_id = type; + m_type_id = type; } constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const { @@ -110,11 +110,11 @@ namespace ams::kern { } constexpr ALWAYS_INLINE void SetPairAddress(uintptr_t a) { - this->pair_address = a; + m_pair_address = a; } constexpr ALWAYS_INLINE void SetTypeAttribute(KMemoryRegionAttr attr) { - this->type_id |= attr; + m_type_id |= attr; } }; static_assert(std::is_trivially_destructible::value); @@ -156,9 +156,9 @@ namespace ams::kern { using iterator = TreeType::iterator; using const_iterator = TreeType::const_iterator; private: - TreeType tree; + TreeType m_tree; public: - constexpr ALWAYS_INLINE KMemoryRegionTree() : tree() { /* ... */ } + constexpr ALWAYS_INLINE KMemoryRegionTree() : m_tree() { /* ... */ } public: KMemoryRegion *FindModifiable(uintptr_t address) { if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) { @@ -246,19 +246,19 @@ namespace ams::kern { public: /* Iterator accessors. */ iterator begin() { - return this->tree.begin(); + return m_tree.begin(); } const_iterator begin() const { - return this->tree.begin(); + return m_tree.begin(); } iterator end() { - return this->tree.end(); + return m_tree.end(); } const_iterator end() const { - return this->tree.end(); + return m_tree.end(); } const_iterator cbegin() const { @@ -270,49 +270,49 @@ namespace ams::kern { } iterator iterator_to(reference ref) { - return this->tree.iterator_to(ref); + return m_tree.iterator_to(ref); } const_iterator iterator_to(const_reference ref) const { - return this->tree.iterator_to(ref); + return m_tree.iterator_to(ref); } /* Content management. */ bool empty() const { - return this->tree.empty(); + return m_tree.empty(); } reference back() { - return this->tree.back(); + return m_tree.back(); } const_reference back() const { - return this->tree.back(); + return m_tree.back(); } reference front() { - return this->tree.front(); + return m_tree.front(); } const_reference front() const { - return this->tree.front(); + return m_tree.front(); } /* GCC over-eagerly inlines this operation. */ NOINLINE iterator insert(reference ref) { - return this->tree.insert(ref); + return m_tree.insert(ref); } NOINLINE iterator erase(iterator it) { - return this->tree.erase(it); + return m_tree.erase(it); } iterator find(const_reference ref) const { - return this->tree.find(ref); + return m_tree.find(ref); } iterator nfind(const_reference ref) const { - return this->tree.nfind(ref); + return m_tree.nfind(ref); } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_region_type.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_region_type.hpp index e0bfa7255..06fd11d28 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_region_type.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_region_type.hpp @@ -52,73 +52,73 @@ namespace ams::kern { private: using ValueType = typename std::underlying_type::type; private: - ValueType value; - size_t next_bit; - bool finalized; - bool sparse_only; - bool dense_only; + ValueType m_value; + size_t m_next_bit; + bool m_finalized; + bool m_sparse_only; + bool m_dense_only; private: - consteval KMemoryRegionTypeValue(ValueType v) : value(v), next_bit(0), finalized(false), sparse_only(false), dense_only(false) { /* ... */ } + consteval KMemoryRegionTypeValue(ValueType v) : m_value(v), m_next_bit(0), m_finalized(false), m_sparse_only(false), m_dense_only(false) { /* ... */ } public: consteval KMemoryRegionTypeValue() : KMemoryRegionTypeValue(0) { /* ... */ } - consteval operator KMemoryRegionType() const { return static_cast(this->value); } - consteval ValueType GetValue() const { return this->value; } + consteval operator KMemoryRegionType() const { return static_cast(m_value); } + consteval ValueType GetValue() const { return m_value; } - consteval const KMemoryRegionTypeValue &Finalize() { this->finalized = true; return *this; } - consteval const KMemoryRegionTypeValue &SetSparseOnly() { this->sparse_only = true; return *this; } - consteval const KMemoryRegionTypeValue &SetDenseOnly() { this->dense_only = true; return *this; } + consteval const KMemoryRegionTypeValue &Finalize() { m_finalized = true; return *this; } + consteval const KMemoryRegionTypeValue &SetSparseOnly() { m_sparse_only = true; return *this; } + consteval const KMemoryRegionTypeValue &SetDenseOnly() { m_dense_only = true; return *this; } - consteval KMemoryRegionTypeValue &SetAttribute(KMemoryRegionAttr attr) { AMS_ASSUME(!this->finalized); this->value |= attr; return *this; } + consteval KMemoryRegionTypeValue &SetAttribute(KMemoryRegionAttr attr) { AMS_ASSUME(!m_finalized); m_value |= attr; return *this; } consteval KMemoryRegionTypeValue DeriveInitial(size_t i, size_t next = BITSIZEOF(ValueType)) const { - AMS_ASSUME(!this->finalized); - AMS_ASSUME(!this->value); - AMS_ASSUME(!this->next_bit); + AMS_ASSUME(!m_finalized); + AMS_ASSUME(!m_value); + AMS_ASSUME(!m_next_bit); AMS_ASSUME(next > i); KMemoryRegionTypeValue new_type = *this; - new_type.value = (ValueType{1} << i); - new_type.next_bit = next; + new_type.m_value = (ValueType{1} << i); + new_type.m_next_bit = next; return new_type; } consteval KMemoryRegionTypeValue DeriveAttribute(KMemoryRegionAttr attr) const { - AMS_ASSUME(!this->finalized); + AMS_ASSUME(!m_finalized); KMemoryRegionTypeValue new_type = *this; - new_type.value |= attr; + new_type.m_value |= attr; return new_type; } consteval KMemoryRegionTypeValue DeriveTransition(size_t ofs = 0, size_t adv = 1) const { - AMS_ASSUME(!this->finalized); + AMS_ASSUME(!m_finalized); AMS_ASSUME(ofs < adv); - AMS_ASSUME(this->next_bit + adv <= BITSIZEOF(ValueType)); + AMS_ASSUME(m_next_bit + adv <= BITSIZEOF(ValueType)); KMemoryRegionTypeValue new_type = *this; - new_type.value |= (ValueType{1} << (this->next_bit + ofs)); - new_type.next_bit += adv; + new_type.m_value |= (ValueType{1} << (m_next_bit + ofs)); + new_type.m_next_bit += adv; return new_type; } consteval KMemoryRegionTypeValue DeriveSparse(size_t ofs, size_t n, size_t i) const { - AMS_ASSUME(!this->finalized); - AMS_ASSUME(!this->dense_only); - AMS_ASSUME(this->next_bit + ofs + n + 1 <= BITSIZEOF(ValueType)); + AMS_ASSUME(!m_finalized); + AMS_ASSUME(!m_dense_only); + AMS_ASSUME(m_next_bit + ofs + n + 1 <= BITSIZEOF(ValueType)); AMS_ASSUME(i < n); KMemoryRegionTypeValue new_type = *this; - new_type.value |= (ValueType{1} << (this->next_bit + ofs)); - new_type.value |= (ValueType{1} << (this->next_bit + ofs + 1 + i)); - new_type.next_bit += ofs + n + 1; + new_type.m_value |= (ValueType{1} << (m_next_bit + ofs)); + new_type.m_value |= (ValueType{1} << (m_next_bit + ofs + 1 + i)); + new_type.m_next_bit += ofs + n + 1; return new_type; } consteval KMemoryRegionTypeValue Derive(size_t n, size_t i) const { - AMS_ASSUME(!this->finalized); - AMS_ASSUME(!this->sparse_only); - AMS_ASSUME(this->next_bit + BitsForDeriveDense(n) <= BITSIZEOF(ValueType)); + AMS_ASSUME(!m_finalized); + AMS_ASSUME(!m_sparse_only); + AMS_ASSUME(m_next_bit + BitsForDeriveDense(n) <= BITSIZEOF(ValueType)); AMS_ASSUME(i < n); size_t low = 0, high = 1; @@ -132,23 +132,23 @@ namespace ams::kern { KMemoryRegionTypeValue new_type = *this; - new_type.value |= (ValueType{1} << (this->next_bit + low)); - new_type.value |= (ValueType{1} << (this->next_bit + high)); - new_type.next_bit += BitsForDeriveDense(n); + new_type.m_value |= (ValueType{1} << (m_next_bit + low)); + new_type.m_value |= (ValueType{1} << (m_next_bit + high)); + new_type.m_next_bit += BitsForDeriveDense(n); return new_type; } consteval KMemoryRegionTypeValue Advance(size_t n) const { - AMS_ASSUME(!this->finalized); - AMS_ASSUME(this->next_bit + n <= BITSIZEOF(ValueType)); + AMS_ASSUME(!m_finalized); + AMS_ASSUME(m_next_bit + n <= BITSIZEOF(ValueType)); KMemoryRegionTypeValue new_type = *this; - new_type.next_bit += n; + new_type.m_next_bit += n; return new_type; } constexpr ALWAYS_INLINE bool IsAncestorOf(ValueType v) const { - return (this->value | v) == v; + return (m_value | v) == v; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp index b57dd233c..f97a16d4b 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp @@ -27,10 +27,10 @@ namespace ams::kern { using List = util::IntrusiveListBaseTraits::ListType; private: - char name[NameLengthMax]; - KAutoObject *object; + char m_name[NameLengthMax]; + KAutoObject *m_object; public: - constexpr KObjectName() : name(), object() { /* ... */ } + constexpr KObjectName() : m_name(), m_object() { /* ... */ } public: static Result NewFromName(KAutoObject *obj, const char *name); static Result Delete(KAutoObject *obj, const char *name); @@ -60,7 +60,7 @@ namespace ams::kern { void Initialize(KAutoObject *obj, const char *name); bool MatchesName(const char *name) const; - KAutoObject *GetObject() const { return this->object; } + KAutoObject *GetObject() const { return m_object; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp index 33d21334c..05780f0cc 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp @@ -23,28 +23,28 @@ namespace ams::kern { private: class RandomBitGenerator { private: - util::TinyMT rng; - u32 entropy; - u32 bits_available; + util::TinyMT m_rng; + u32 m_entropy; + u32 m_bits_available; private: void RefreshEntropy() { - this->entropy = rng.GenerateRandomU32(); - this->bits_available = BITSIZEOF(this->entropy); + m_entropy = m_rng.GenerateRandomU32(); + m_bits_available = BITSIZEOF(m_entropy); } bool GenerateRandomBit() { - if (this->bits_available == 0) { + if (m_bits_available == 0) { this->RefreshEntropy(); } - const bool rnd_bit = (this->entropy & 1) != 0; - this->entropy >>= 1; - --this->bits_available; + const bool rnd_bit = (m_entropy & 1) != 0; + m_entropy >>= 1; + --m_bits_available; return rnd_bit; } public: - RandomBitGenerator() : rng(), entropy(), bits_available() { - this->rng.Initialize(static_cast(KSystemControl::GenerateRandomU64())); + RandomBitGenerator() : m_rng(), m_entropy(), m_bits_available() { + m_rng.Initialize(static_cast(KSystemControl::GenerateRandomU64())); } size_t SelectRandomBit(u64 bitmap) { @@ -89,27 +89,27 @@ namespace ams::kern { public: static constexpr size_t MaxDepth = 4; private: - u64 *bit_storages[MaxDepth]; - RandomBitGenerator rng; - size_t num_bits; - size_t used_depths; + u64 *m_bit_storages[MaxDepth]; + RandomBitGenerator m_rng; + size_t m_num_bits; + size_t m_used_depths; public: - KPageBitmap() : bit_storages(), rng(), num_bits(), used_depths() { /* ... */ } + KPageBitmap() : m_bit_storages(), m_rng(), m_num_bits(), m_used_depths() { /* ... */ } - constexpr size_t GetNumBits() const { return this->num_bits; } - constexpr s32 GetHighestDepthIndex() const { return static_cast(this->used_depths) - 1; } + constexpr size_t GetNumBits() const { return m_num_bits; } + constexpr s32 GetHighestDepthIndex() const { return static_cast(m_used_depths) - 1; } u64 *Initialize(u64 *storage, size_t size) { /* Initially, everything is un-set. */ - this->num_bits = 0; + m_num_bits = 0; /* Calculate the needed bitmap depth. */ - this->used_depths = static_cast(GetRequiredDepth(size)); - MESOSPHERE_ASSERT(this->used_depths <= MaxDepth); + m_used_depths = static_cast(GetRequiredDepth(size)); + MESOSPHERE_ASSERT(m_used_depths <= MaxDepth); /* Set the bitmap pointers. */ for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { - this->bit_storages[depth] = storage; + m_bit_storages[depth] = storage; size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64); storage += size; } @@ -123,18 +123,18 @@ namespace ams::kern { if (random) { do { - const u64 v = this->bit_storages[depth][offset]; + const u64 v = m_bit_storages[depth][offset]; if (v == 0) { /* If depth is bigger than zero, then a previous level indicated a block was free. */ MESOSPHERE_ASSERT(depth == 0); return -1; } - offset = offset * BITSIZEOF(u64) + this->rng.SelectRandomBit(v); + offset = offset * BITSIZEOF(u64) + m_rng.SelectRandomBit(v); ++depth; - } while (depth < static_cast(this->used_depths)); + } while (depth < static_cast(m_used_depths)); } else { do { - const u64 v = this->bit_storages[depth][offset]; + const u64 v = m_bit_storages[depth][offset]; if (v == 0) { /* If depth is bigger than zero, then a previous level indicated a block was free. */ MESOSPHERE_ASSERT(depth == 0); @@ -142,7 +142,7 @@ namespace ams::kern { } offset = offset * BITSIZEOF(u64) + __builtin_ctzll(v); ++depth; - } while (depth < static_cast(this->used_depths)); + } while (depth < static_cast(m_used_depths)); } return static_cast(offset); @@ -150,17 +150,17 @@ namespace ams::kern { void SetBit(size_t offset) { this->SetBit(this->GetHighestDepthIndex(), offset); - this->num_bits++; + m_num_bits++; } void ClearBit(size_t offset) { this->ClearBit(this->GetHighestDepthIndex(), offset); - this->num_bits--; + m_num_bits--; } bool ClearRange(size_t offset, size_t count) { s32 depth = this->GetHighestDepthIndex(); - u64 *bits = this->bit_storages[depth]; + u64 *bits = m_bit_storages[depth]; size_t bit_ind = offset / BITSIZEOF(u64); if (AMS_LIKELY(count < BITSIZEOF(u64))) { const size_t shift = offset % BITSIZEOF(u64); @@ -202,7 +202,7 @@ namespace ams::kern { } while (remaining > 0); } - this->num_bits -= count; + m_num_bits -= count; return true; } private: @@ -212,7 +212,7 @@ namespace ams::kern { size_t which = offset % BITSIZEOF(u64); const u64 mask = u64(1) << which; - u64 *bit = std::addressof(this->bit_storages[depth][ind]); + u64 *bit = std::addressof(m_bit_storages[depth][ind]); u64 v = *bit; MESOSPHERE_ASSERT((v & mask) == 0); *bit = v | mask; @@ -230,7 +230,7 @@ namespace ams::kern { size_t which = offset % BITSIZEOF(u64); const u64 mask = u64(1) << which; - u64 *bit = std::addressof(this->bit_storages[depth][ind]); + u64 *bit = std::addressof(m_bit_storages[depth][ind]); u64 v = *bit; MESOSPHERE_ASSERT((v & mask) != 0); v &= ~mask; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp index 980fc6c36..a5bf95e44 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp @@ -21,10 +21,10 @@ namespace ams::kern { class KPageBuffer : public KSlabAllocated { private: - alignas(PageSize) u8 buffer[PageSize]; + alignas(PageSize) u8 m_buffer[PageSize]; public: KPageBuffer() { - std::memset(buffer, 0, sizeof(buffer)); + std::memset(m_buffer, 0, sizeof(m_buffer)); } ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp index 15040b5d8..a7e126f45 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp @@ -24,24 +24,24 @@ namespace ams::kern { class KBlockInfo : public util::IntrusiveListBaseNode { private: - KVirtualAddress address; - size_t num_pages; + KVirtualAddress m_address; + size_t m_num_pages; public: - constexpr KBlockInfo() : util::IntrusiveListBaseNode(), address(), num_pages() { /* ... */ } + constexpr KBlockInfo() : util::IntrusiveListBaseNode(), m_address(), m_num_pages() { /* ... */ } constexpr void Initialize(KVirtualAddress addr, size_t np) { - this->address = addr; - this->num_pages = np; + m_address = addr; + m_num_pages = np; } - constexpr KVirtualAddress GetAddress() const { return this->address; } - constexpr size_t GetNumPages() const { return this->num_pages; } + constexpr KVirtualAddress GetAddress() const { return m_address; } + constexpr size_t GetNumPages() const { return m_num_pages; } constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; } constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); } constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; } constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const { - return this->address == rhs.address && this->num_pages == rhs.num_pages; + return m_address == rhs.m_address && m_num_pages == rhs.m_num_pages; } constexpr bool operator==(const KBlockInfo &rhs) const { @@ -55,7 +55,7 @@ namespace ams::kern { constexpr bool IsStrictlyBefore(KVirtualAddress addr) const { const KVirtualAddress end = this->GetEndAddress(); - if (this->address != Null && end == Null) { + if (m_address != Null && end == Null) { return false; } @@ -68,7 +68,7 @@ namespace ams::kern { constexpr bool TryConcatenate(KVirtualAddress addr, size_t np) { if (addr != Null && addr == this->GetEndAddress()) { - this->num_pages += np; + m_num_pages += np; return true; } return false; @@ -80,17 +80,17 @@ namespace ams::kern { using BlockInfoList = util::IntrusiveListBaseTraits::ListType; using iterator = BlockInfoList::const_iterator; private: - BlockInfoList block_list; - KBlockInfoManager *manager; + BlockInfoList m_block_list; + KBlockInfoManager *m_manager; public: - explicit KPageGroup(KBlockInfoManager *m) : block_list(), manager(m) { /* ... */ } + explicit KPageGroup(KBlockInfoManager *m) : m_block_list(), m_manager(m) { /* ... */ } ~KPageGroup() { this->Finalize(); } void Finalize(); - iterator begin() const { return this->block_list.begin(); } - iterator end() const { return this->block_list.end(); } - bool empty() const { return this->block_list.empty(); } + iterator begin() const { return m_block_list.begin(); } + iterator end() const { return m_block_list.end(); } + bool empty() const { return m_block_list.empty(); } Result AddBlock(KVirtualAddress addr, size_t num_pages); void Open() const; @@ -111,14 +111,14 @@ namespace ams::kern { class KScopedPageGroup { private: - const KPageGroup *group; + const KPageGroup *m_pg; public: - explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp) : group(gp) { if (this->group) { this->group->Open(); } } + explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp) : m_pg(gp) { if (m_pg) { m_pg->Open(); } } explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ } - ALWAYS_INLINE ~KScopedPageGroup() { if (this->group) { this->group->Close(); } } + ALWAYS_INLINE ~KScopedPageGroup() { if (m_pg) { m_pg->Close(); } } ALWAYS_INLINE void CancelClose() { - this->group = nullptr; + m_pg = nullptr; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp index 35ac503bd..e14e7e0df 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp @@ -53,48 +53,48 @@ namespace ams::kern { private: class Block { private: - KPageBitmap bitmap; - KVirtualAddress heap_address; - uintptr_t end_offset; - size_t block_shift; - size_t next_block_shift; + KPageBitmap m_bitmap; + KVirtualAddress m_heap_address; + uintptr_t m_end_offset; + size_t m_block_shift; + size_t m_next_block_shift; public: - Block() : bitmap(), heap_address(), end_offset(), block_shift(), next_block_shift() { /* ... */ } + Block() : m_bitmap(), m_heap_address(), m_end_offset(), m_block_shift(), m_next_block_shift() { /* ... */ } - constexpr size_t GetShift() const { return this->block_shift; } - constexpr size_t GetNextShift() const { return this->next_block_shift; } + constexpr size_t GetShift() const { return m_block_shift; } + constexpr size_t GetNextShift() const { return m_next_block_shift; } constexpr size_t GetSize() const { return u64(1) << this->GetShift(); } constexpr size_t GetNumPages() const { return this->GetSize() / PageSize; } - constexpr size_t GetNumFreeBlocks() const { return this->bitmap.GetNumBits(); } + constexpr size_t GetNumFreeBlocks() const { return m_bitmap.GetNumBits(); } constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); } u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) { /* Set shifts. */ - this->block_shift = bs; - this->next_block_shift = nbs; + m_block_shift = bs; + m_next_block_shift = nbs; /* Align up the address. */ KVirtualAddress end = addr + size; - const size_t align = (this->next_block_shift != 0) ? (u64(1) << this->next_block_shift) : (u64(1) << this->block_shift); + const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift) : (u64(1) << m_block_shift); addr = util::AlignDown(GetInteger(addr), align); end = util::AlignUp(GetInteger(end), align); - this->heap_address = addr; - this->end_offset = (end - addr) / (u64(1) << this->block_shift); - return this->bitmap.Initialize(bit_storage, this->end_offset); + m_heap_address = addr; + m_end_offset = (end - addr) / (u64(1) << m_block_shift); + return m_bitmap.Initialize(bit_storage, m_end_offset); } KVirtualAddress PushBlock(KVirtualAddress address) { /* Set the bit for the free block. */ - size_t offset = (address - this->heap_address) >> this->GetShift(); - this->bitmap.SetBit(offset); + size_t offset = (address - m_heap_address) >> this->GetShift(); + m_bitmap.SetBit(offset); /* If we have a next shift, try to clear the blocks below this one and return the new address. */ if (this->GetNextShift()) { const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift()); offset = util::AlignDown(offset, diff); - if (this->bitmap.ClearRange(offset, diff)) { - return this->heap_address + (offset << this->GetShift()); + if (m_bitmap.ClearRange(offset, diff)) { + return m_heap_address + (offset << this->GetShift()); } } @@ -104,15 +104,15 @@ namespace ams::kern { KVirtualAddress PopBlock(bool random) { /* Find a free block. */ - ssize_t soffset = this->bitmap.FindFreeBlock(random); + ssize_t soffset = m_bitmap.FindFreeBlock(random); if (soffset < 0) { return Null; } const size_t offset = static_cast(soffset); /* Update our tracking and return it. */ - this->bitmap.ClearBit(offset); - return this->heap_address + (offset << this->GetShift()); + m_bitmap.ClearBit(offset); + return m_heap_address + (offset << this->GetShift()); } public: static constexpr size_t CalculateManagementOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) { @@ -123,21 +123,21 @@ namespace ams::kern { } }; private: - KVirtualAddress heap_address; - size_t heap_size; - size_t used_size; - size_t num_blocks; - Block blocks[NumMemoryBlockPageShifts]; + KVirtualAddress m_heap_address; + size_t m_heap_size; + size_t m_used_size; + size_t m_num_blocks; + Block m_blocks[NumMemoryBlockPageShifts]; private: void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts); size_t GetNumFreePages() const; void FreeBlock(KVirtualAddress block, s32 index); public: - KPageHeap() : heap_address(), heap_size(), used_size(), num_blocks(), blocks() { /* ... */ } + KPageHeap() : m_heap_address(), m_heap_size(), m_used_size(), m_num_blocks(), m_blocks() { /* ... */ } - constexpr KVirtualAddress GetAddress() const { return this->heap_address; } - constexpr size_t GetSize() const { return this->heap_size; } + constexpr KVirtualAddress GetAddress() const { return m_heap_address; } + constexpr size_t GetSize() const { return m_heap_size; } constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); } constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; } constexpr size_t GetPageOffsetToEnd(KVirtualAddress block) const { return (this->GetEndAddress() - block) / PageSize; } @@ -150,7 +150,7 @@ namespace ams::kern { void DumpFreeList() const; void UpdateUsedSize() { - this->used_size = this->heap_size - (this->GetNumFreePages() * PageSize); + m_used_size = m_heap_size - (this->GetNumFreePages() * PageSize); } KVirtualAddress AllocateBlock(s32 index, bool random); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp index 0f44b77b0..9192e5c77 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp @@ -76,30 +76,30 @@ namespace ams::kern { struct PageLinkedList { private: struct Node { - Node *next; - u8 buffer[PageSize - sizeof(Node *)]; + Node *m_next; + u8 m_buffer[PageSize - sizeof(Node *)]; }; static_assert(util::is_pod::value); private: - Node *root; + Node *m_root; public: - constexpr PageLinkedList() : root(nullptr) { /* ... */ } + constexpr PageLinkedList() : m_root(nullptr) { /* ... */ } void Push(Node *n) { MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast(n), PageSize)); - n->next = this->root; - this->root = n; + n->m_next = m_root; + m_root = n; } void Push(KVirtualAddress addr) { this->Push(GetPointer(addr)); } - Node *Peek() const { return this->root; } + Node *Peek() const { return m_root; } Node *Pop() { - Node *r = this->root; - this->root = this->root->next; + Node *r = m_root; + m_root = m_root->m_next; return r; } }; @@ -122,82 +122,72 @@ namespace ams::kern { private: class KScopedPageTableUpdater { private: - KPageTableBase *page_table; - PageLinkedList ll; + KPageTableBase *m_pt; + PageLinkedList m_ll; public: - ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase *pt) : page_table(pt), ll() { /* ... */ } + ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase *pt) : m_pt(pt), m_ll() { /* ... */ } ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase &pt) : KScopedPageTableUpdater(std::addressof(pt)) { /* ... */ } - ALWAYS_INLINE ~KScopedPageTableUpdater() { this->page_table->FinalizeUpdate(this->GetPageList()); } + ALWAYS_INLINE ~KScopedPageTableUpdater() { m_pt->FinalizeUpdate(this->GetPageList()); } - PageLinkedList *GetPageList() { return std::addressof(this->ll); } + PageLinkedList *GetPageList() { return std::addressof(m_ll); } }; private: - KProcessAddress address_space_start; - KProcessAddress address_space_end; - KProcessAddress heap_region_start; - KProcessAddress heap_region_end; - KProcessAddress current_heap_end; - KProcessAddress alias_region_start; - KProcessAddress alias_region_end; - KProcessAddress stack_region_start; - KProcessAddress stack_region_end; - KProcessAddress kernel_map_region_start; - KProcessAddress kernel_map_region_end; - KProcessAddress alias_code_region_start; - KProcessAddress alias_code_region_end; - KProcessAddress code_region_start; - KProcessAddress code_region_end; - size_t max_heap_size; - size_t mapped_physical_memory_size; - size_t mapped_unsafe_physical_memory; - mutable KLightLock general_lock; - mutable KLightLock map_physical_memory_lock; - KPageTableImpl impl; - KMemoryBlockManager memory_block_manager; - u32 allocate_option; - u32 address_space_width; - bool is_kernel; - bool enable_aslr; - bool enable_device_address_space_merge; - KMemoryBlockSlabManager *memory_block_slab_manager; - KBlockInfoManager *block_info_manager; - const KMemoryRegion *cached_physical_linear_region; - const KMemoryRegion *cached_physical_heap_region; - const KMemoryRegion *cached_virtual_heap_region; - MemoryFillValue heap_fill_value; - MemoryFillValue ipc_fill_value; - MemoryFillValue stack_fill_value; + KProcessAddress m_address_space_start{}; + KProcessAddress m_address_space_end{}; + KProcessAddress m_heap_region_start{}; + KProcessAddress m_heap_region_end{}; + KProcessAddress m_current_heap_end{}; + KProcessAddress m_alias_region_start{}; + KProcessAddress m_alias_region_end{}; + KProcessAddress m_stack_region_start{}; + KProcessAddress m_stack_region_end{}; + KProcessAddress m_kernel_map_region_start{}; + KProcessAddress m_kernel_map_region_end{}; + KProcessAddress m_alias_code_region_start{}; + KProcessAddress m_alias_code_region_end{}; + KProcessAddress m_code_region_start{}; + KProcessAddress m_code_region_end{}; + size_t m_max_heap_size{}; + size_t m_mapped_physical_memory_size{}; + size_t m_mapped_unsafe_physical_memory{}; + mutable KLightLock m_general_lock{}; + mutable KLightLock m_map_physical_memory_lock{}; + KPageTableImpl m_impl{}; + KMemoryBlockManager m_memory_block_manager{}; + u32 m_allocate_option{}; + u32 m_address_space_width{}; + bool m_is_kernel{}; + bool m_enable_aslr{}; + bool m_enable_device_address_space_merge{}; + KMemoryBlockSlabManager *m_memory_block_slab_manager{}; + KBlockInfoManager *m_block_info_manager{}; + const KMemoryRegion *m_cached_physical_linear_region{}; + const KMemoryRegion *m_cached_physical_heap_region{}; + const KMemoryRegion *m_cached_virtual_heap_region{}; + MemoryFillValue m_heap_fill_value{}; + MemoryFillValue m_ipc_fill_value{}; + MemoryFillValue m_stack_fill_value{}; public: - constexpr KPageTableBase() : - address_space_start(), address_space_end(), heap_region_start(), heap_region_end(), current_heap_end(), - alias_region_start(), alias_region_end(), stack_region_start(), stack_region_end(), kernel_map_region_start(), - kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(), - max_heap_size(), mapped_physical_memory_size(), mapped_unsafe_physical_memory(), general_lock(), map_physical_memory_lock(), - impl(), memory_block_manager(), allocate_option(), address_space_width(), is_kernel(), enable_aslr(), enable_device_address_space_merge(), - memory_block_slab_manager(), block_info_manager(), cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(), - heap_fill_value(), ipc_fill_value(), stack_fill_value() - { - /* ... */ - } + constexpr KPageTableBase() { /* ... */ } NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end); NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_device_address_space_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager); void Finalize(); - constexpr bool IsKernel() const { return this->is_kernel; } - constexpr bool IsAslrEnabled() const { return this->enable_aslr; } + constexpr bool IsKernel() const { return m_is_kernel; } + constexpr bool IsAslrEnabled() const { return m_enable_aslr; } constexpr bool Contains(KProcessAddress addr) const { - return this->address_space_start <= addr && addr <= this->address_space_end - 1; + return m_address_space_start <= addr && addr <= m_address_space_end - 1; } constexpr bool Contains(KProcessAddress addr, size_t size) const { - return this->address_space_start <= addr && addr < addr + size && addr + size - 1 <= this->address_space_end - 1; + return m_address_space_start <= addr && addr < addr + size && addr + size - 1 <= m_address_space_end - 1; } constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const { - return this->Contains(addr, size) && this->alias_region_start <= addr && addr + size - 1 <= this->alias_region_end - 1; + return this->Contains(addr, size) && m_alias_region_start <= addr && addr + size - 1 <= m_alias_region_end - 1; } bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { @@ -213,55 +203,55 @@ namespace ams::kern { virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0; virtual void FinalizeUpdate(PageLinkedList *page_list) = 0; - KPageTableImpl &GetImpl() { return this->impl; } - const KPageTableImpl &GetImpl() const { return this->impl; } + KPageTableImpl &GetImpl() { return m_impl; } + const KPageTableImpl &GetImpl() const { return m_impl; } - bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); } + bool IsLockedByCurrentThread() const { return m_general_lock.IsLockedByCurrentThread(); } bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - return KMemoryLayout::IsLinearMappedPhysicalAddress(this->cached_physical_linear_region, phys_addr); + return KMemoryLayout::IsLinearMappedPhysicalAddress(m_cached_physical_linear_region, phys_addr); } bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - return KMemoryLayout::IsLinearMappedPhysicalAddress(this->cached_physical_linear_region, phys_addr, size); + return KMemoryLayout::IsLinearMappedPhysicalAddress(m_cached_physical_linear_region, phys_addr, size); } bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - return KMemoryLayout::IsHeapPhysicalAddress(this->cached_physical_heap_region, phys_addr); + return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); } bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - return KMemoryLayout::IsHeapPhysicalAddress(this->cached_physical_heap_region, phys_addr, size); + return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr, size); } bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) { MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread()); - return KMemoryLayout::IsHeapPhysicalAddress(this->cached_physical_heap_region, phys_addr); + return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); } bool IsHeapVirtualAddress(KVirtualAddress virt_addr) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - return KMemoryLayout::IsHeapVirtualAddress(this->cached_virtual_heap_region, virt_addr); + return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr); } bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - return KMemoryLayout::IsHeapVirtualAddress(this->cached_virtual_heap_region, virt_addr, size); + return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr, size); } bool ContainsPages(KProcessAddress addr, size_t num_pages) const { - return (this->address_space_start <= addr) && (num_pages <= (this->address_space_end - this->address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= this->address_space_end - 1); + return (m_address_space_start <= addr) && (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); } private: constexpr size_t GetNumGuardPages() const { return this->IsKernel() ? 1 : 4; } @@ -308,7 +298,7 @@ namespace ams::kern { return this->GetImpl().GetPhysicalAddress(out, virt_addr); } - KBlockInfoManager *GetBlockInfoManager() const { return this->block_info_manager; } + KBlockInfoManager *GetBlockInfoManager() const { return m_block_info_manager; } Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm); Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm); @@ -386,43 +376,43 @@ namespace ams::kern { void DumpMemoryBlocksLocked() const { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - this->memory_block_manager.DumpBlocks(); + m_memory_block_manager.DumpBlocks(); } void DumpMemoryBlocks() const { - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); this->DumpMemoryBlocksLocked(); } void DumpPageTable() const { - KScopedLightLock lk(this->general_lock); - this->GetImpl().Dump(GetInteger(this->address_space_start), this->address_space_end - this->address_space_start); + KScopedLightLock lk(m_general_lock); + this->GetImpl().Dump(GetInteger(m_address_space_start), m_address_space_end - m_address_space_start); } size_t CountPageTables() const { - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); return this->GetImpl().CountPageTables(); } public: - KProcessAddress GetAddressSpaceStart() const { return this->address_space_start; } - KProcessAddress GetHeapRegionStart() const { return this->heap_region_start; } - KProcessAddress GetAliasRegionStart() const { return this->alias_region_start; } - KProcessAddress GetStackRegionStart() const { return this->stack_region_start; } - KProcessAddress GetKernelMapRegionStart() const { return this->kernel_map_region_start; } - KProcessAddress GetAliasCodeRegionStart() const { return this->alias_code_region_start; } + KProcessAddress GetAddressSpaceStart() const { return m_address_space_start; } + KProcessAddress GetHeapRegionStart() const { return m_heap_region_start; } + KProcessAddress GetAliasRegionStart() const { return m_alias_region_start; } + KProcessAddress GetStackRegionStart() const { return m_stack_region_start; } + KProcessAddress GetKernelMapRegionStart() const { return m_kernel_map_region_start; } + KProcessAddress GetAliasCodeRegionStart() const { return m_alias_code_region_start; } - size_t GetAddressSpaceSize() const { return this->address_space_end - this->address_space_start; } - size_t GetHeapRegionSize() const { return this->heap_region_end - this->heap_region_start; } - size_t GetAliasRegionSize() const { return this->alias_region_end - this->alias_region_start; } - size_t GetStackRegionSize() const { return this->stack_region_end - this->stack_region_start; } - size_t GetKernelMapRegionSize() const { return this->kernel_map_region_end - this->kernel_map_region_start; } - size_t GetAliasCodeRegionSize() const { return this->alias_code_region_end - this->alias_code_region_start; } + size_t GetAddressSpaceSize() const { return m_address_space_end - m_address_space_start; } + size_t GetHeapRegionSize() const { return m_heap_region_end - m_heap_region_start; } + size_t GetAliasRegionSize() const { return m_alias_region_end - m_alias_region_start; } + size_t GetStackRegionSize() const { return m_stack_region_end - m_stack_region_start; } + size_t GetKernelMapRegionSize() const { return m_kernel_map_region_end - m_kernel_map_region_start; } + size_t GetAliasCodeRegionSize() const { return m_alias_code_region_end - m_alias_code_region_start; } size_t GetNormalMemorySize() const { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); - return (this->current_heap_end - this->heap_region_start) + this->mapped_physical_memory_size; + return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size; } size_t GetCodeSize() const; @@ -430,7 +420,7 @@ namespace ams::kern { size_t GetAliasCodeSize() const; size_t GetAliasCodeDataSize() const; - u32 GetAllocateOption() const { return this->allocate_option; } + u32 GetAllocateOption() const { return m_allocate_option; } public: static ALWAYS_INLINE KVirtualAddress GetLinearMappedVirtualAddress(KPhysicalAddress addr) { return KMemoryLayout::GetLinearVirtualAddress(addr); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp index dc2fe96da..ca8edf5b5 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp @@ -24,7 +24,7 @@ namespace ams::kern { class PageTablePage { private: - u8 buffer[PageSize]; + u8 m_buffer[PageSize]; }; static_assert(sizeof(PageTablePage) == PageSize); @@ -38,23 +38,23 @@ namespace ams::kern { private: using BaseHeap = KDynamicSlabHeap; private: - RefCount *ref_counts; + RefCount *m_ref_counts; public: static constexpr size_t CalculateReferenceCountSize(size_t size) { return (size / PageSize) * sizeof(RefCount); } public: - constexpr KPageTableManager() : BaseHeap(), ref_counts() { /* ... */ } + constexpr KPageTableManager() : BaseHeap(), m_ref_counts() { /* ... */ } private: void Initialize(RefCount *rc) { - this->ref_counts = rc; + m_ref_counts = rc; for (size_t i = 0; i < this->GetSize() / PageSize; i++) { - this->ref_counts[i] = 0; + m_ref_counts[i] = 0; } } constexpr RefCount *GetRefCountPointer(KVirtualAddress addr) const { - return std::addressof(this->ref_counts[(addr - this->GetAddress()) / PageSize]); + return std::addressof(m_ref_counts[(addr - this->GetAddress()) / PageSize]); } public: void Initialize(KDynamicPageManager *page_allocator, RefCount *rc) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp index 1c7d5da02..6182ab537 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp @@ -35,13 +35,13 @@ namespace ams::kern { ServerClosed = 3, }; private: - KServerPort server; - KClientPort client; - uintptr_t name; - State state; - bool is_light; + KServerPort m_server; + KClientPort m_client; + uintptr_t m_name; + State m_state; + bool m_is_light; public: - constexpr KPort() : server(), client(), name(), state(State::Invalid), is_light() { /* ... */ } + constexpr KPort() : m_server(), m_client(), m_name(), m_state(State::Invalid), m_is_light() { /* ... */ } virtual ~KPort() { /* ... */ } static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } @@ -50,16 +50,16 @@ namespace ams::kern { void OnClientClosed(); void OnServerClosed(); - uintptr_t GetName() const { return this->name; } - bool IsLight() const { return this->is_light; } + uintptr_t GetName() const { return m_name; } + bool IsLight() const { return m_is_light; } Result EnqueueSession(KServerSession *session); Result EnqueueSession(KLightServerSession *session); - KClientPort &GetClientPort() { return this->client; } - KServerPort &GetServerPort() { return this->server; } - const KClientPort &GetClientPort() const { return this->client; } - const KServerPort &GetServerPort() const { return this->server; } + KClientPort &GetClientPort() { return m_client; } + KServerPort &GetServerPort() { return m_server; } + const KClientPort &GetClientPort() const { return m_client; } + const KServerPort &GetServerPort() const { return m_server; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp index a9ad07006..79064d70f 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp @@ -68,11 +68,11 @@ namespace ams::kern { public: class KPerCoreQueue { private: - Entry root[NumCores]; + Entry m_root[NumCores]; public: - constexpr ALWAYS_INLINE KPerCoreQueue() : root() { + constexpr ALWAYS_INLINE KPerCoreQueue() : m_root() { for (size_t i = 0; i < NumCores; i++) { - this->root[i].Initialize(); + m_root[i].Initialize(); } } @@ -81,14 +81,14 @@ namespace ams::kern { Entry &member_entry = member->GetPriorityQueueEntry(core); /* Get the entry associated with the end of the queue. */ - Member *tail = this->root[core].GetPrev(); - Entry &tail_entry = (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; + Member *tail = m_root[core].GetPrev(); + Entry &tail_entry = (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : m_root[core]; /* Link the entries. */ member_entry.SetPrev(tail); member_entry.SetNext(nullptr); tail_entry.SetNext(member); - this->root[core].SetPrev(member); + m_root[core].SetPrev(member); return (tail == nullptr); } @@ -98,14 +98,14 @@ namespace ams::kern { Entry &member_entry = member->GetPriorityQueueEntry(core); /* Get the entry associated with the front of the queue. */ - Member *head = this->root[core].GetNext(); - Entry &head_entry = (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; + Member *head = m_root[core].GetNext(); + Entry &head_entry = (head != nullptr) ? head->GetPriorityQueueEntry(core) : m_root[core]; /* Link the entries. */ member_entry.SetPrev(nullptr); member_entry.SetNext(head); head_entry.SetPrev(member); - this->root[core].SetNext(member); + m_root[core].SetNext(member); return (head == nullptr); } @@ -117,8 +117,8 @@ namespace ams::kern { /* Get the entries associated with next and prev. */ Member *prev = member_entry.GetPrev(); Member *next = member_entry.GetNext(); - Entry &prev_entry = (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core]; - Entry &next_entry = (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; + Entry &prev_entry = (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : m_root[core]; + Entry &next_entry = (next != nullptr) ? next->GetPriorityQueueEntry(core) : m_root[core]; /* Unlink. */ prev_entry.SetNext(next); @@ -128,24 +128,24 @@ namespace ams::kern { } constexpr ALWAYS_INLINE Member *GetFront(s32 core) const { - return this->root[core].GetNext(); + return m_root[core].GetNext(); } }; class KPriorityQueueImpl { private: - KPerCoreQueue queues[NumPriority]; - util::BitSet64 available_priorities[NumCores]; + KPerCoreQueue m_queues[NumPriority]; + util::BitSet64 m_available_priorities[NumCores]; public: - constexpr ALWAYS_INLINE KPriorityQueueImpl() : queues(), available_priorities() { /* ... */ } + constexpr ALWAYS_INLINE KPriorityQueueImpl() : m_queues(), m_available_priorities() { /* ... */ } constexpr ALWAYS_INLINE void PushBack(s32 priority, s32 core, Member *member) { MESOSPHERE_ASSERT(IsValidCore(core)); MESOSPHERE_ASSERT(IsValidPriority(priority)); if (AMS_LIKELY(priority <= LowestPriority)) { - if (this->queues[priority].PushBack(core, member)) { - this->available_priorities[core].SetBit(priority); + if (m_queues[priority].PushBack(core, member)) { + m_available_priorities[core].SetBit(priority); } } } @@ -155,8 +155,8 @@ namespace ams::kern { MESOSPHERE_ASSERT(IsValidPriority(priority)); if (AMS_LIKELY(priority <= LowestPriority)) { - if (this->queues[priority].PushFront(core, member)) { - this->available_priorities[core].SetBit(priority); + if (m_queues[priority].PushFront(core, member)) { + m_available_priorities[core].SetBit(priority); } } } @@ -166,8 +166,8 @@ namespace ams::kern { MESOSPHERE_ASSERT(IsValidPriority(priority)); if (AMS_LIKELY(priority <= LowestPriority)) { - if (this->queues[priority].Remove(core, member)) { - this->available_priorities[core].ClearBit(priority); + if (m_queues[priority].Remove(core, member)) { + m_available_priorities[core].ClearBit(priority); } } } @@ -175,9 +175,9 @@ namespace ams::kern { constexpr ALWAYS_INLINE Member *GetFront(s32 core) const { MESOSPHERE_ASSERT(IsValidCore(core)); - const s32 priority = this->available_priorities[core].CountLeadingZero(); + const s32 priority = m_available_priorities[core].CountLeadingZero(); if (AMS_LIKELY(priority <= LowestPriority)) { - return this->queues[priority].GetFront(core); + return m_queues[priority].GetFront(core); } else { return nullptr; } @@ -188,7 +188,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(IsValidPriority(priority)); if (AMS_LIKELY(priority <= LowestPriority)) { - return this->queues[priority].GetFront(core); + return m_queues[priority].GetFront(core); } else { return nullptr; } @@ -199,9 +199,9 @@ namespace ams::kern { Member *next = member->GetPriorityQueueEntry(core).GetNext(); if (next == nullptr) { - const s32 priority = this->available_priorities[core].GetNextSet(member->GetPriority()); + const s32 priority = m_available_priorities[core].GetNextSet(member->GetPriority()); if (AMS_LIKELY(priority <= LowestPriority)) { - next = this->queues[priority].GetFront(core); + next = m_queues[priority].GetFront(core); } } return next; @@ -212,8 +212,8 @@ namespace ams::kern { MESOSPHERE_ASSERT(IsValidPriority(priority)); if (AMS_LIKELY(priority <= LowestPriority)) { - this->queues[priority].Remove(core, member); - this->queues[priority].PushFront(core, member); + m_queues[priority].Remove(core, member); + m_queues[priority].PushFront(core, member); } } @@ -222,17 +222,17 @@ namespace ams::kern { MESOSPHERE_ASSERT(IsValidPriority(priority)); if (AMS_LIKELY(priority <= LowestPriority)) { - this->queues[priority].Remove(core, member); - this->queues[priority].PushBack(core, member); - return this->queues[priority].GetFront(core); + m_queues[priority].Remove(core, member); + m_queues[priority].PushBack(core, member); + return m_queues[priority].GetFront(core); } else { return nullptr; } } }; private: - KPriorityQueueImpl scheduled_queue; - KPriorityQueueImpl suggested_queue; + KPriorityQueueImpl m_scheduled_queue; + KPriorityQueueImpl m_suggested_queue; private: constexpr ALWAYS_INLINE void ClearAffinityBit(u64 &affinity, s32 core) { affinity &= ~(u64(1ul) << core); @@ -250,13 +250,13 @@ namespace ams::kern { /* Push onto the scheduled queue for its core, if we can. */ u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { - this->scheduled_queue.PushBack(priority, core, member); + m_scheduled_queue.PushBack(priority, core, member); ClearAffinityBit(affinity, core); } /* And suggest the thread for all other cores. */ while (affinity) { - this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + m_suggested_queue.PushBack(priority, GetNextCore(affinity), member); } } @@ -266,14 +266,14 @@ namespace ams::kern { /* Push onto the scheduled queue for its core, if we can. */ u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { - this->scheduled_queue.PushFront(priority, core, member); + m_scheduled_queue.PushFront(priority, core, member); ClearAffinityBit(affinity, core); } /* And suggest the thread for all other cores. */ /* Note: Nintendo pushes onto the back of the suggested queue, not the front. */ while (affinity) { - this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + m_suggested_queue.PushBack(priority, GetNextCore(affinity), member); } } @@ -283,41 +283,41 @@ namespace ams::kern { /* Remove from the scheduled queue for its core. */ u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { - this->scheduled_queue.Remove(priority, core, member); + m_scheduled_queue.Remove(priority, core, member); ClearAffinityBit(affinity, core); } /* Remove from the suggested queue for all other cores. */ while (affinity) { - this->suggested_queue.Remove(priority, GetNextCore(affinity), member); + m_suggested_queue.Remove(priority, GetNextCore(affinity), member); } } public: - constexpr ALWAYS_INLINE KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */ } + constexpr ALWAYS_INLINE KPriorityQueue() : m_scheduled_queue(), m_suggested_queue() { /* ... */ } /* Getters. */ constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core) const { - return this->scheduled_queue.GetFront(core); + return m_scheduled_queue.GetFront(core); } constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core, s32 priority) const { - return this->scheduled_queue.GetFront(priority, core); + return m_scheduled_queue.GetFront(priority, core); } constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core) const { - return this->suggested_queue.GetFront(core); + return m_suggested_queue.GetFront(core); } constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core, s32 priority) const { - return this->suggested_queue.GetFront(priority, core); + return m_suggested_queue.GetFront(priority, core); } constexpr ALWAYS_INLINE Member *GetScheduledNext(s32 core, const Member *member) const { - return this->scheduled_queue.GetNext(core, member); + return m_scheduled_queue.GetNext(core, member); } constexpr ALWAYS_INLINE Member *GetSuggestedNext(s32 core, const Member *member) const { - return this->suggested_queue.GetNext(core, member); + return m_suggested_queue.GetNext(core, member); } constexpr ALWAYS_INLINE Member *GetSamePriorityNext(s32 core, const Member *member) const { @@ -334,11 +334,11 @@ namespace ams::kern { } constexpr ALWAYS_INLINE void MoveToScheduledFront(Member *member) { - this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); + m_scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); } constexpr ALWAYS_INLINE KThread *MoveToScheduledBack(Member *member) { - return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member); + return m_scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member); } /* First class fancy operations. */ @@ -367,9 +367,9 @@ namespace ams::kern { for (s32 core = 0; core < static_cast(NumCores); core++) { if (prev_affinity.GetAffinity(core)) { if (core == prev_core) { - this->scheduled_queue.Remove(priority, core, member); + m_scheduled_queue.Remove(priority, core, member); } else { - this->suggested_queue.Remove(priority, core, member); + m_suggested_queue.Remove(priority, core, member); } } } @@ -378,9 +378,9 @@ namespace ams::kern { for (s32 core = 0; core < static_cast(NumCores); core++) { if (new_affinity.GetAffinity(core)) { if (core == new_core) { - this->scheduled_queue.PushBack(priority, core, member); + m_scheduled_queue.PushBack(priority, core, member); } else { - this->suggested_queue.PushBack(priority, core, member); + m_suggested_queue.PushBack(priority, core, member); } } } @@ -395,22 +395,22 @@ namespace ams::kern { if (prev_core != new_core) { /* Remove from the scheduled queue for the previous core. */ if (prev_core >= 0) { - this->scheduled_queue.Remove(priority, prev_core, member); + m_scheduled_queue.Remove(priority, prev_core, member); } /* Remove from the suggested queue and add to the scheduled queue for the new core. */ if (new_core >= 0) { - this->suggested_queue.Remove(priority, new_core, member); + m_suggested_queue.Remove(priority, new_core, member); if (to_front) { - this->scheduled_queue.PushFront(priority, new_core, member); + m_scheduled_queue.PushFront(priority, new_core, member); } else { - this->scheduled_queue.PushBack(priority, new_core, member); + m_scheduled_queue.PushBack(priority, new_core, member); } } /* Add to the suggested queue for the previous core. */ if (prev_core >= 0) { - this->suggested_queue.PushBack(priority, prev_core, member); + m_suggested_queue.PushBack(priority, prev_core, member); } } } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp index b8dc58b0b..fce982912 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -48,79 +48,79 @@ namespace ams::kern { State_DebugBreak = ams::svc::ProcessState_DebugBreak, }; - using ThreadList = util::IntrusiveListMemberTraits<&KThread::process_list_node>::ListType; + using ThreadList = util::IntrusiveListMemberTraits<&KThread::m_process_list_node>::ListType; static constexpr size_t AslrAlignment = KernelAslrAlignment; private: using SharedMemoryInfoList = util::IntrusiveListBaseTraits::ListType; - using BetaList = util::IntrusiveListMemberTraits<&KBeta::process_list_node>::ListType; + using BetaList = util::IntrusiveListMemberTraits<&KBeta::m_process_list_node>::ListType; using TLPTree = util::IntrusiveRedBlackTreeBaseTraits::TreeType; using TLPIterator = TLPTree::iterator; private: - KProcessPageTable page_table{}; - std::atomic used_kernel_memory_size{}; - TLPTree fully_used_tlp_tree{}; - TLPTree partially_used_tlp_tree{}; - s32 ideal_core_id{}; - void *attached_object{}; - KResourceLimit *resource_limit{}; - KVirtualAddress system_resource_address{}; - size_t system_resource_num_pages{}; - size_t memory_release_hint{}; - State state{}; - KLightLock state_lock{}; - KLightLock list_lock{}; - KConditionVariable cond_var{}; - KAddressArbiter address_arbiter{}; - u64 entropy[4]{}; - bool is_signaled{}; - bool is_initialized{}; - bool is_application{}; - char name[13]{}; - std::atomic num_threads{}; - u16 peak_num_threads{}; - u32 flags{}; - KMemoryManager::Pool memory_pool{}; - s64 schedule_count{}; - KCapabilities capabilities{}; - ams::svc::ProgramId program_id{}; - u64 process_id{}; - s64 creation_time{}; - KProcessAddress code_address{}; - size_t code_size{}; - size_t main_thread_stack_size{}; - size_t max_process_memory{}; - u32 version{}; - KHandleTable handle_table{}; - KProcessAddress plr_address{}; - void *plr_heap_address{}; - KThread *exception_thread{}; - ThreadList thread_list{}; - SharedMemoryInfoList shared_memory_list{}; - BetaList beta_list{}; - bool is_suspended{}; - bool is_jit_debug{}; - ams::svc::DebugEvent jit_debug_event_type{}; - ams::svc::DebugException jit_debug_exception_type{}; - uintptr_t jit_debug_params[4]{}; - u64 jit_debug_thread_id{}; - KWaitObject wait_object{}; - KThread *running_threads[cpu::NumCores]{}; - u64 running_thread_idle_counts[cpu::NumCores]{}; - KThread *pinned_threads[cpu::NumCores]{}; - std::atomic num_created_threads{}; - std::atomic cpu_time{}; - std::atomic num_process_switches{}; - std::atomic num_thread_switches{}; - std::atomic num_fpu_switches{}; - std::atomic num_supervisor_calls{}; - std::atomic num_ipc_messages{}; - std::atomic num_ipc_replies{}; - std::atomic num_ipc_receives{}; - KDynamicPageManager dynamic_page_manager{}; - KMemoryBlockSlabManager memory_block_slab_manager{}; - KBlockInfoManager block_info_manager{}; - KPageTableManager page_table_manager{}; + KProcessPageTable m_page_table{}; + std::atomic m_used_kernel_memory_size{}; + TLPTree m_fully_used_tlp_tree{}; + TLPTree m_partially_used_tlp_tree{}; + s32 m_ideal_core_id{}; + void *m_attached_object{}; + KResourceLimit *m_resource_limit{}; + KVirtualAddress m_system_resource_address{}; + size_t m_system_resource_num_pages{}; + size_t m_memory_release_hint{}; + State m_state{}; + KLightLock m_state_lock{}; + KLightLock m_list_lock{}; + KConditionVariable m_cond_var{}; + KAddressArbiter m_address_arbiter{}; + u64 m_entropy[4]{}; + bool m_is_signaled{}; + bool m_is_initialized{}; + bool m_is_application{}; + char m_name[13]{}; + std::atomic m_num_threads{}; + u16 m_peak_num_threads{}; + u32 m_flags{}; + KMemoryManager::Pool m_memory_pool{}; + s64 m_schedule_count{}; + KCapabilities m_capabilities{}; + ams::svc::ProgramId m_program_id{}; + u64 m_process_id{}; + s64 m_creation_time{}; + KProcessAddress m_code_address{}; + size_t m_code_size{}; + size_t m_main_thread_stack_size{}; + size_t m_max_process_memory{}; + u32 m_version{}; + KHandleTable m_handle_table{}; + KProcessAddress m_plr_address{}; + void *m_plr_heap_address{}; + KThread *m_exception_thread{}; + ThreadList m_thread_list{}; + SharedMemoryInfoList m_shared_memory_list{}; + BetaList m_beta_list{}; + bool m_is_suspended{}; + bool m_is_jit_debug{}; + ams::svc::DebugEvent m_jit_debug_event_type{}; + ams::svc::DebugException m_jit_debug_exception_type{}; + uintptr_t m_jit_debug_params[4]{}; + u64 m_jit_debug_thread_id{}; + KWaitObject m_wait_object{}; + KThread *m_running_threads[cpu::NumCores]{}; + u64 m_running_thread_idle_counts[cpu::NumCores]{}; + KThread *m_pinned_threads[cpu::NumCores]{}; + std::atomic m_num_created_threads{}; + std::atomic m_cpu_time{}; + std::atomic m_num_process_switches{}; + std::atomic m_num_thread_switches{}; + std::atomic m_num_fpu_switches{}; + std::atomic m_num_supervisor_calls{}; + std::atomic m_num_ipc_messages{}; + std::atomic m_num_ipc_replies{}; + std::atomic m_num_ipc_receives{}; + KDynamicPageManager m_dynamic_page_manager{}; + KMemoryBlockSlabManager m_memory_block_slab_manager{}; + KBlockInfoManager m_block_info_manager{}; + KPageTableManager m_page_table_manager{}; private: Result Initialize(const ams::svc::CreateProcessParameter ¶ms); @@ -130,15 +130,15 @@ namespace ams::kern { void PinThread(s32 core_id, KThread *thread) { MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast(cpu::NumCores)); MESOSPHERE_ASSERT(thread != nullptr); - MESOSPHERE_ASSERT(this->pinned_threads[core_id] == nullptr); - this->pinned_threads[core_id] = thread; + MESOSPHERE_ASSERT(m_pinned_threads[core_id] == nullptr); + m_pinned_threads[core_id] = thread; } void UnpinThread(s32 core_id, KThread *thread) { MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast(cpu::NumCores)); MESOSPHERE_ASSERT(thread != nullptr); - MESOSPHERE_ASSERT(this->pinned_threads[core_id] == thread); - this->pinned_threads[core_id] = nullptr; + MESOSPHERE_ASSERT(m_pinned_threads[core_id] == thread); + m_pinned_threads[core_id] = nullptr; } public: KProcess() { /* ... */ } @@ -148,67 +148,67 @@ namespace ams::kern { Result Initialize(const ams::svc::CreateProcessParameter ¶ms, svc::KUserPointer caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool); void Exit(); - constexpr const char *GetName() const { return this->name; } + constexpr const char *GetName() const { return m_name; } - constexpr ams::svc::ProgramId GetProgramId() const { return this->program_id; } + constexpr ams::svc::ProgramId GetProgramId() const { return m_program_id; } - constexpr u64 GetProcessId() const { return this->process_id; } + constexpr u64 GetProcessId() const { return m_process_id; } - constexpr State GetState() const { return this->state; } + constexpr State GetState() const { return m_state; } - constexpr u64 GetCoreMask() const { return this->capabilities.GetCoreMask(); } - constexpr u64 GetPriorityMask() const { return this->capabilities.GetPriorityMask(); } + constexpr u64 GetCoreMask() const { return m_capabilities.GetCoreMask(); } + constexpr u64 GetPriorityMask() const { return m_capabilities.GetPriorityMask(); } - constexpr s32 GetIdealCoreId() const { return this->ideal_core_id; } - constexpr void SetIdealCoreId(s32 core_id) { this->ideal_core_id = core_id; } + constexpr s32 GetIdealCoreId() const { return m_ideal_core_id; } + constexpr void SetIdealCoreId(s32 core_id) { m_ideal_core_id = core_id; } constexpr bool CheckThreadPriority(s32 prio) const { return ((1ul << prio) & this->GetPriorityMask()) != 0; } - constexpr u32 GetCreateProcessFlags() const { return this->flags; } + constexpr u32 GetCreateProcessFlags() const { return m_flags; } - constexpr bool Is64Bit() const { return this->flags & ams::svc::CreateProcessFlag_Is64Bit; } + constexpr bool Is64Bit() const { return m_flags & ams::svc::CreateProcessFlag_Is64Bit; } - constexpr KProcessAddress GetEntryPoint() const { return this->code_address; } + constexpr KProcessAddress GetEntryPoint() const { return m_code_address; } - constexpr size_t GetMainStackSize() const { return this->main_thread_stack_size; } + constexpr size_t GetMainStackSize() const { return m_main_thread_stack_size; } - constexpr KMemoryManager::Pool GetMemoryPool() const { return this->memory_pool; } + constexpr KMemoryManager::Pool GetMemoryPool() const { return m_memory_pool; } - constexpr u64 GetRandomEntropy(size_t i) const { return this->entropy[i]; } + constexpr u64 GetRandomEntropy(size_t i) const { return m_entropy[i]; } - constexpr bool IsApplication() const { return this->is_application; } + constexpr bool IsApplication() const { return m_is_application; } - constexpr bool IsSuspended() const { return this->is_suspended; } - constexpr void SetSuspended(bool suspended) { this->is_suspended = suspended; } + constexpr bool IsSuspended() const { return m_is_suspended; } + constexpr void SetSuspended(bool suspended) { m_is_suspended = suspended; } Result Terminate(); constexpr bool IsTerminated() const { - return this->state == State_Terminated; + return m_state == State_Terminated; } constexpr bool IsAttachedToDebugger() const { - return this->attached_object != nullptr; + return m_attached_object != nullptr; } constexpr bool IsPermittedInterrupt(int32_t interrupt_id) const { - return this->capabilities.IsPermittedInterrupt(interrupt_id); + return m_capabilities.IsPermittedInterrupt(interrupt_id); } constexpr bool IsPermittedDebug() const { - return this->capabilities.IsPermittedDebug(); + return m_capabilities.IsPermittedDebug(); } constexpr bool CanForceDebug() const { - return this->capabilities.CanForceDebug(); + return m_capabilities.CanForceDebug(); } - u32 GetAllocateOption() const { return this->page_table.GetAllocateOption(); } + u32 GetAllocateOption() const { return m_page_table.GetAllocateOption(); } - ThreadList &GetThreadList() { return this->thread_list; } - const ThreadList &GetThreadList() const { return this->thread_list; } + ThreadList &GetThreadList() { return m_thread_list; } + const ThreadList &GetThreadList() const { return m_thread_list; } - constexpr void *GetDebugObject() const { return this->attached_object; } + constexpr void *GetDebugObject() const { return m_attached_object; } KProcess::State SetDebugObject(void *debug_object); void ClearDebugObject(KProcess::State state); @@ -223,46 +223,46 @@ namespace ams::kern { KThread *GetPinnedThread(s32 core_id) const { MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast(cpu::NumCores)); - return this->pinned_threads[core_id]; + return m_pinned_threads[core_id]; } void CopySvcPermissionsTo(KThread::StackParameters &sp) { - this->capabilities.CopySvcPermissionsTo(sp); + m_capabilities.CopySvcPermissionsTo(sp); } void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) { - this->capabilities.CopyPinnedSvcPermissionsTo(sp); + m_capabilities.CopyPinnedSvcPermissionsTo(sp); } void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) { - this->capabilities.CopyUnpinnedSvcPermissionsTo(sp); + m_capabilities.CopyUnpinnedSvcPermissionsTo(sp); } void CopyEnterExceptionSvcPermissionsTo(KThread::StackParameters &sp) { - this->capabilities.CopyEnterExceptionSvcPermissionsTo(sp); + m_capabilities.CopyEnterExceptionSvcPermissionsTo(sp); } void CopyLeaveExceptionSvcPermissionsTo(KThread::StackParameters &sp) { - this->capabilities.CopyLeaveExceptionSvcPermissionsTo(sp); + m_capabilities.CopyLeaveExceptionSvcPermissionsTo(sp); } - constexpr KResourceLimit *GetResourceLimit() const { return this->resource_limit; } + constexpr KResourceLimit *GetResourceLimit() const { return m_resource_limit; } bool ReserveResource(ams::svc::LimitableResource which, s64 value); bool ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout); void ReleaseResource(ams::svc::LimitableResource which, s64 value); void ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint); - constexpr KLightLock &GetStateLock() { return this->state_lock; } - constexpr KLightLock &GetListLock() { return this->list_lock; } + constexpr KLightLock &GetStateLock() { return m_state_lock; } + constexpr KLightLock &GetListLock() { return m_list_lock; } - constexpr KProcessPageTable &GetPageTable() { return this->page_table; } - constexpr const KProcessPageTable &GetPageTable() const { return this->page_table; } + constexpr KProcessPageTable &GetPageTable() { return m_page_table; } + constexpr const KProcessPageTable &GetPageTable() const { return m_page_table; } - constexpr KHandleTable &GetHandleTable() { return this->handle_table; } - constexpr const KHandleTable &GetHandleTable() const { return this->handle_table; } + constexpr KHandleTable &GetHandleTable() { return m_handle_table; } + constexpr const KHandleTable &GetHandleTable() const { return m_handle_table; } - KWaitObject *GetWaitObjectPointer() { return std::addressof(this->wait_object); } + KWaitObject *GetWaitObjectPointer() { return std::addressof(m_wait_object); } size_t GetUsedUserPhysicalMemorySize() const; size_t GetTotalUserPhysicalMemorySize() const; @@ -276,45 +276,45 @@ namespace ams::kern { Result DeleteThreadLocalRegion(KProcessAddress addr); void *GetThreadLocalRegionPointer(KProcessAddress addr); - constexpr KProcessAddress GetProcessLocalRegionAddress() const { return this->plr_address; } + constexpr KProcessAddress GetProcessLocalRegionAddress() const { return m_plr_address; } - void AddCpuTime(s64 diff) { this->cpu_time += diff; } - s64 GetCpuTime() { return this->cpu_time; } + void AddCpuTime(s64 diff) { m_cpu_time += diff; } + s64 GetCpuTime() { return m_cpu_time; } - constexpr s64 GetScheduledCount() const { return this->schedule_count; } - void IncrementScheduledCount() { ++this->schedule_count; } + constexpr s64 GetScheduledCount() const { return m_schedule_count; } + void IncrementScheduledCount() { ++m_schedule_count; } void IncrementThreadCount(); void DecrementThreadCount(); - size_t GetTotalSystemResourceSize() const { return this->system_resource_num_pages * PageSize; } + size_t GetTotalSystemResourceSize() const { return m_system_resource_num_pages * PageSize; } size_t GetUsedSystemResourceSize() const { - if (this->system_resource_num_pages == 0) { + if (m_system_resource_num_pages == 0) { return 0; } - return this->dynamic_page_manager.GetUsed() * PageSize; + return m_dynamic_page_manager.GetUsed() * PageSize; } void SetRunningThread(s32 core, KThread *thread, u64 idle_count) { - this->running_threads[core] = thread; - this->running_thread_idle_counts[core] = idle_count; + m_running_threads[core] = thread; + m_running_thread_idle_counts[core] = idle_count; } void ClearRunningThread(KThread *thread) { - for (size_t i = 0; i < util::size(this->running_threads); ++i) { - if (this->running_threads[i] == thread) { - this->running_threads[i] = nullptr; + for (size_t i = 0; i < util::size(m_running_threads); ++i) { + if (m_running_threads[i] == thread) { + m_running_threads[i] = nullptr; } } } - const KDynamicPageManager &GetDynamicPageManager() const { return this->dynamic_page_manager; } - const KMemoryBlockSlabManager &GetMemoryBlockSlabManager() const { return this->memory_block_slab_manager; } - const KBlockInfoManager &GetBlockInfoManager() const { return this->block_info_manager; } - const KPageTableManager &GetPageTableManager() const { return this->page_table_manager; } + const KDynamicPageManager &GetDynamicPageManager() const { return m_dynamic_page_manager; } + const KMemoryBlockSlabManager &GetMemoryBlockSlabManager() const { return m_memory_block_slab_manager; } + const KBlockInfoManager &GetBlockInfoManager() const { return m_block_info_manager; } + const KPageTableManager &GetPageTableManager() const { return m_page_table_manager; } - constexpr KThread *GetRunningThread(s32 core) const { return this->running_threads[core]; } - constexpr u64 GetRunningThreadIdleCount(s32 core) const { return this->running_thread_idle_counts[core]; } + constexpr KThread *GetRunningThread(s32 core) const { return m_running_threads[core]; } + constexpr u64 GetRunningThreadIdleCount(s32 core) const { return m_running_thread_idle_counts[core]; } void RegisterThread(KThread *thread); void UnregisterThread(KThread *thread); @@ -324,13 +324,13 @@ namespace ams::kern { Result Reset(); void SetDebugBreak() { - if (this->state == State_RunningAttached) { + if (m_state == State_RunningAttached) { this->ChangeState(State_DebugBreak); } } void SetAttached() { - if (this->state == State_DebugBreak) { + if (m_state == State_DebugBreak) { this->ChangeState(State_RunningAttached); } } @@ -341,27 +341,27 @@ namespace ams::kern { void UnpinCurrentThread(); Result SignalToAddress(KProcessAddress address) { - return this->cond_var.SignalToAddress(address); + return m_cond_var.SignalToAddress(address); } Result WaitForAddress(ams::svc::Handle handle, KProcessAddress address, u32 tag) { - return this->cond_var.WaitForAddress(handle, address, tag); + return m_cond_var.WaitForAddress(handle, address, tag); } void SignalConditionVariable(uintptr_t cv_key, int32_t count) { - return this->cond_var.Signal(cv_key, count); + return m_cond_var.Signal(cv_key, count); } Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) { - return this->cond_var.Wait(address, cv_key, tag, ns); + return m_cond_var.Wait(address, cv_key, tag, ns); } Result SignalAddressArbiter(uintptr_t address, ams::svc::SignalType signal_type, s32 value, s32 count) { - return this->address_arbiter.SignalToAddress(address, signal_type, value, count); + return m_address_arbiter.SignalToAddress(address, signal_type, value, count); } Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s32 value, s64 timeout) { - return this->address_arbiter.WaitForAddress(address, arb_type, value, timeout); + return m_address_arbiter.WaitForAddress(address, arb_type, value, timeout); } Result GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer out_thread_ids, s32 max_out_count); @@ -381,7 +381,7 @@ namespace ams::kern { } public: /* Overridden parent functions. */ - virtual bool IsInitialized() const override { return this->is_initialized; } + virtual bool IsInitialized() const override { return m_is_initialized; } static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } @@ -392,15 +392,15 @@ namespace ams::kern { virtual bool IsSignaled() const override { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); - return this->is_signaled; + return m_is_signaled; } virtual void DoWorkerTask() override; private: void ChangeState(State new_state) { - if (this->state != new_state) { - this->state = new_state; - this->is_signaled = true; + if (m_state != new_state) { + m_state = new_state; + m_is_signaled = true; this->NotifyAvailable(); } } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_readable_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_readable_event.hpp index 0afc530d1..9eaf7e573 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_readable_event.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_readable_event.hpp @@ -24,19 +24,19 @@ namespace ams::kern { class KReadableEvent : public KSynchronizationObject { MESOSPHERE_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject); private: - bool is_signaled; - KEvent *parent_event; + bool m_is_signaled; + KEvent *m_parent; public: - constexpr explicit KReadableEvent() : KSynchronizationObject(), is_signaled(), parent_event() { MESOSPHERE_ASSERT_THIS(); } + constexpr explicit KReadableEvent() : KSynchronizationObject(), m_is_signaled(), m_parent() { MESOSPHERE_ASSERT_THIS(); } virtual ~KReadableEvent() { MESOSPHERE_ASSERT_THIS(); } constexpr void Initialize(KEvent *parent) { MESOSPHERE_ASSERT_THIS(); - this->is_signaled = false; - this->parent_event = parent; + m_is_signaled = false; + m_parent = parent; } - constexpr KEvent *GetParent() const { return this->parent_event; } + constexpr KEvent *GetParent() const { return m_parent; } virtual bool IsSignaled() const override; virtual void Destroy() override; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp index 8bfb6af05..f3e1cc67d 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp @@ -25,15 +25,15 @@ namespace ams::kern { class KResourceLimit final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject); private: - s64 limit_values[ams::svc::LimitableResource_Count]; - s64 current_values[ams::svc::LimitableResource_Count]; - s64 current_hints[ams::svc::LimitableResource_Count]; - s64 peak_values[ams::svc::LimitableResource_Count]; - mutable KLightLock lock; - s32 waiter_count; - KLightConditionVariable cond_var; + s64 m_limit_values[ams::svc::LimitableResource_Count]; + s64 m_current_values[ams::svc::LimitableResource_Count]; + s64 m_current_hints[ams::svc::LimitableResource_Count]; + s64 m_peak_values[ams::svc::LimitableResource_Count]; + mutable KLightLock m_lock; + s32 m_waiter_count; + KLightConditionVariable m_cond_var; public: - constexpr ALWAYS_INLINE KResourceLimit() : limit_values(), current_values(), current_hints(), peak_values(), lock(), waiter_count(), cond_var() { /* ... */ } + constexpr ALWAYS_INLINE KResourceLimit() : m_limit_values(), m_current_values(), m_current_hints(), m_peak_values(), m_lock(), m_waiter_count(), m_cond_var() { /* ... */ } virtual ~KResourceLimit() { /* ... */ } static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp index e415aface..02b267ac2 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp @@ -50,35 +50,35 @@ namespace ams::kern { friend class KScopedSchedulerLockAndSleep; friend class KScopedDisableDispatch; private: - SchedulingState state; - bool is_active; - s32 core_id; - KThread *prev_thread; - s64 last_context_switch_time; - KThread *idle_thread; - std::atomic current_thread; + SchedulingState m_state; + bool m_is_active; + s32 m_core_id; + KThread *m_prev_thread; + s64 m_last_context_switch_time; + KThread *m_idle_thread; + std::atomic m_current_thread; public: constexpr KScheduler() - : state(), is_active(false), core_id(0), prev_thread(nullptr), last_context_switch_time(0), idle_thread(nullptr), current_thread(nullptr) + : m_state(), m_is_active(false), m_core_id(0), m_prev_thread(nullptr), m_last_context_switch_time(0), m_idle_thread(nullptr), m_current_thread(nullptr) { - this->state.needs_scheduling = true; - this->state.interrupt_task_thread_runnable = false; - this->state.should_count_idle = false; - this->state.idle_count = 0; - this->state.idle_thread_stack = nullptr; - this->state.highest_priority_thread = nullptr; + m_state.needs_scheduling = true; + m_state.interrupt_task_thread_runnable = false; + m_state.should_count_idle = false; + m_state.idle_count = 0; + m_state.idle_thread_stack = nullptr; + m_state.highest_priority_thread = nullptr; } NOINLINE void Initialize(KThread *idle_thread); NOINLINE void Activate(); ALWAYS_INLINE void SetInterruptTaskRunnable() { - this->state.interrupt_task_thread_runnable = true; - this->state.needs_scheduling = true; + m_state.interrupt_task_thread_runnable = true; + m_state.needs_scheduling = true; } ALWAYS_INLINE void RequestScheduleOnInterrupt() { - this->state.needs_scheduling = true; + m_state.needs_scheduling = true; if (CanSchedule()) { this->ScheduleOnInterrupt(); @@ -86,23 +86,23 @@ namespace ams::kern { } ALWAYS_INLINE u64 GetIdleCount() const { - return this->state.idle_count; + return m_state.idle_count; } ALWAYS_INLINE KThread *GetIdleThread() const { - return this->idle_thread; + return m_idle_thread; } ALWAYS_INLINE KThread *GetPreviousThread() const { - return this->prev_thread; + return m_prev_thread; } ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const { - return this->current_thread; + return m_current_thread; } ALWAYS_INLINE s64 GetLastContextSwitchTime() const { - return this->last_context_switch_time; + return m_last_context_switch_time; } private: /* Static private API. */ @@ -161,7 +161,7 @@ namespace ams::kern { ALWAYS_INLINE void Schedule() { MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); - MESOSPHERE_ASSERT(this->core_id == GetCurrentCoreId()); + MESOSPHERE_ASSERT(m_core_id == GetCurrentCoreId()); this->ScheduleImpl(); } @@ -181,7 +181,7 @@ namespace ams::kern { KScopedInterruptDisable intr_disable; ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); }; - if (this->state.needs_scheduling) { + if (m_state.needs_scheduling) { Schedule(); } } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp index c413fcde1..0f9e27f94 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp @@ -33,16 +33,16 @@ namespace ams::kern { template requires KSchedulerLockable class KAbstractSchedulerLock { private: - KAlignedSpinLock spin_lock; - s32 lock_count; - KThread *owner_thread; + KAlignedSpinLock m_spin_lock; + s32 m_lock_count; + KThread *m_owner_thread; public: - constexpr ALWAYS_INLINE KAbstractSchedulerLock() : spin_lock(), lock_count(0), owner_thread(nullptr) { MESOSPHERE_ASSERT_THIS(); } + constexpr ALWAYS_INLINE KAbstractSchedulerLock() : m_spin_lock(), m_lock_count(0), m_owner_thread(nullptr) { MESOSPHERE_ASSERT_THIS(); } ALWAYS_INLINE bool IsLockedByCurrentThread() const { MESOSPHERE_ASSERT_THIS(); - return this->owner_thread == GetCurrentThreadPointer(); + return m_owner_thread == GetCurrentThreadPointer(); } void Lock() { @@ -50,36 +50,36 @@ namespace ams::kern { if (this->IsLockedByCurrentThread()) { /* If we already own the lock, we can just increment the count. */ - MESOSPHERE_ASSERT(this->lock_count > 0); - this->lock_count++; + MESOSPHERE_ASSERT(m_lock_count > 0); + m_lock_count++; } else { /* Otherwise, we want to disable scheduling and acquire the spinlock. */ SchedulerType::DisableScheduling(); - this->spin_lock.Lock(); + m_spin_lock.Lock(); /* For debug, ensure that our state is valid. */ - MESOSPHERE_ASSERT(this->lock_count == 0); - MESOSPHERE_ASSERT(this->owner_thread == nullptr); + MESOSPHERE_ASSERT(m_lock_count == 0); + MESOSPHERE_ASSERT(m_owner_thread == nullptr); /* Increment count, take ownership. */ - this->lock_count = 1; - this->owner_thread = GetCurrentThreadPointer(); + m_lock_count = 1; + m_owner_thread = GetCurrentThreadPointer(); } } void Unlock() { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - MESOSPHERE_ASSERT(this->lock_count > 0); + MESOSPHERE_ASSERT(m_lock_count > 0); /* Release an instance of the lock. */ - if ((--this->lock_count) == 0) { + if ((--m_lock_count) == 0) { /* We're no longer going to hold the lock. Take note of what cores need scheduling. */ const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads(); /* Note that we no longer hold the lock, and unlock the spinlock. */ - this->owner_thread = nullptr; - this->spin_lock.Unlock(); + m_owner_thread = nullptr; + m_spin_lock.Unlock(); /* Enable scheduling, and perform a rescheduling operation. */ SchedulerType::EnableScheduling(cores_needing_scheduling); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scoped_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_lock.hpp index 8ae0fecbc..fb7f6c2ea 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scoped_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_lock.hpp @@ -29,11 +29,11 @@ namespace ams::kern { NON_COPYABLE(KScopedLock); NON_MOVEABLE(KScopedLock); private: - T *lock_ptr; + T &m_lock; public: - explicit ALWAYS_INLINE KScopedLock(T *l) : lock_ptr(l) { this->lock_ptr->Lock(); } - explicit ALWAYS_INLINE KScopedLock(T &l) : KScopedLock(std::addressof(l)) { /* ... */ } - ALWAYS_INLINE ~KScopedLock() { this->lock_ptr->Unlock(); } + explicit ALWAYS_INLINE KScopedLock(T &l) : m_lock(l) { m_lock.Lock(); } + explicit ALWAYS_INLINE KScopedLock(T *l) : KScopedLock(*l) { /* ... */ } + ALWAYS_INLINE ~KScopedLock() { m_lock.Unlock(); } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scoped_resource_reservation.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_resource_reservation.hpp index f0979ec63..250c24020 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scoped_resource_reservation.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_resource_reservation.hpp @@ -22,24 +22,24 @@ namespace ams::kern { class KScopedResourceReservation { private: - KResourceLimit *limit; - s64 value; - ams::svc::LimitableResource resource; - bool succeeded; + KResourceLimit *m_limit; + s64 m_value; + ams::svc::LimitableResource m_resource; + bool m_succeeded; public: - ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v, s64 timeout) : limit(l), value(v), resource(r) { - if (this->limit && this->value) { - this->succeeded = this->limit->Reserve(this->resource, this->value, timeout); + ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v, s64 timeout) : m_limit(l), m_value(v), m_resource(r) { + if (m_limit && m_value) { + m_succeeded = m_limit->Reserve(m_resource, m_value, timeout); } else { - this->succeeded = true; + m_succeeded = true; } } - ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v = 1) : limit(l), value(v), resource(r) { - if (this->limit && this->value) { - this->succeeded = this->limit->Reserve(this->resource, this->value); + ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v = 1) : m_limit(l), m_value(v), m_resource(r) { + if (m_limit && m_value) { + m_succeeded = m_limit->Reserve(m_resource, m_value); } else { - this->succeeded = true; + m_succeeded = true; } } @@ -47,17 +47,17 @@ namespace ams::kern { ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v = 1) : KScopedResourceReservation(p->GetResourceLimit(), r, v) { /* ... */ } ALWAYS_INLINE ~KScopedResourceReservation() { - if (this->limit && this->value && this->succeeded) { - this->limit->Release(this->resource, this->value); + if (m_limit && m_value && m_succeeded) { + m_limit->Release(m_resource, m_value); } } ALWAYS_INLINE void Commit() { - this->limit = nullptr; + m_limit = nullptr; } ALWAYS_INLINE bool Succeeded() const { - return this->succeeded; + return m_succeeded; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scoped_scheduler_lock_and_sleep.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_scheduler_lock_and_sleep.hpp index 37b1cfbf3..cfae34c11 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scoped_scheduler_lock_and_sleep.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scoped_scheduler_lock_and_sleep.hpp @@ -23,24 +23,24 @@ namespace ams::kern { class KScopedSchedulerLockAndSleep { private: - s64 timeout_tick; - KThread *thread; - KHardwareTimer *timer; + s64 m_timeout_tick; + KThread *m_thread; + KHardwareTimer *m_timer; public: - explicit ALWAYS_INLINE KScopedSchedulerLockAndSleep(KHardwareTimer **out_timer, KThread *t, s64 timeout) : timeout_tick(timeout), thread(t) { + explicit ALWAYS_INLINE KScopedSchedulerLockAndSleep(KHardwareTimer **out_timer, KThread *t, s64 timeout) : m_timeout_tick(timeout), m_thread(t) { /* Lock the scheduler. */ KScheduler::s_scheduler_lock.Lock(); /* Set our timer only if the absolute time is positive. */ - this->timer = (this->timeout_tick > 0) ? std::addressof(Kernel::GetHardwareTimer()) : nullptr; + m_timer = (m_timeout_tick > 0) ? std::addressof(Kernel::GetHardwareTimer()) : nullptr; - *out_timer = this->timer; + *out_timer = m_timer; } ~KScopedSchedulerLockAndSleep() { /* Register the sleep. */ - if (this->timeout_tick > 0) { - this->timer->RegisterAbsoluteTask(this->thread, this->timeout_tick); + if (m_timeout_tick > 0) { + m_timer->RegisterAbsoluteTask(m_thread, m_timeout_tick); } /* Unlock the scheduler. */ @@ -48,7 +48,7 @@ namespace ams::kern { } ALWAYS_INLINE void CancelSleep() { - this->timeout_tick = 0; + m_timeout_tick = 0; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_server_port.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_server_port.hpp index e5862aa51..ef79fba36 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_server_port.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_server_port.hpp @@ -30,11 +30,11 @@ namespace ams::kern { using SessionList = util::IntrusiveListBaseTraits::ListType; using LightSessionList = util::IntrusiveListBaseTraits::ListType; private: - SessionList session_list; - LightSessionList light_session_list; - KPort *parent; + SessionList m_session_list; + LightSessionList m_light_session_list; + KPort *m_parent; public: - constexpr KServerPort() : session_list(), light_session_list(), parent() { /* ... */ } + constexpr KServerPort() : m_session_list(), m_light_session_list(), m_parent() { /* ... */ } virtual ~KServerPort() { /* ... */ } void Initialize(KPort *parent); @@ -44,7 +44,7 @@ namespace ams::kern { KServerSession *AcceptSession(); KLightServerSession *AcceptLightSession(); - constexpr const KPort *GetParent() const { return this->parent; } + constexpr const KPort *GetParent() const { return m_parent; } bool IsLight() const; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_server_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_server_session.hpp index 12979af04..307828d71 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_server_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_server_session.hpp @@ -28,19 +28,19 @@ namespace ams::kern { private: using RequestList = util::IntrusiveListBaseTraits::ListType; private: - KSession *parent; - RequestList request_list; - KSessionRequest *current_request; - KLightLock lock; + KSession *m_parent; + RequestList m_request_list; + KSessionRequest *m_current_request; + KLightLock m_lock; public: - constexpr KServerSession() : parent(), request_list(), current_request(), lock() { /* ... */ } + constexpr KServerSession() : m_parent(), m_request_list(), m_current_request(), m_lock() { /* ... */ } virtual ~KServerSession() { /* ... */ } virtual void Destroy() override; - void Initialize(KSession *p) { this->parent = p; } + void Initialize(KSession *p) { m_parent = p; } - constexpr const KSession *GetParent() const { return this->parent; } + constexpr const KSession *GetParent() const { return m_parent; } virtual bool IsSignaled() const override; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp index a278391ca..c00b9dd63 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp @@ -35,16 +35,16 @@ namespace ams::kern { ServerClosed = 3, }; private: - KServerSession server; - KClientSession client; - State state; - KClientPort *port; - uintptr_t name; - KProcess *process; - bool initialized; + KServerSession m_server; + KClientSession m_client; + State m_state; + KClientPort *m_port; + uintptr_t m_name; + KProcess *m_process; + bool m_initialized; public: constexpr KSession() - : server(), client(), state(State::Invalid), port(), name(), process(), initialized() + : m_server(), m_client(), m_state(State::Invalid), m_port(), m_name(), m_process(), m_initialized() { /* ... */ } @@ -54,25 +54,25 @@ namespace ams::kern { void Initialize(KClientPort *client_port, uintptr_t name); virtual void Finalize() override; - virtual bool IsInitialized() const override { return this->initialized; } - virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->process); } + virtual bool IsInitialized() const override { return m_initialized; } + virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(m_process); } static void PostDestroy(uintptr_t arg); void OnServerClosed(); void OnClientClosed(); - bool IsServerClosed() const { return this->state != State::Normal; } - bool IsClientClosed() const { return this->state != State::Normal; } + bool IsServerClosed() const { return m_state != State::Normal; } + bool IsClientClosed() const { return m_state != State::Normal; } - Result OnRequest(KSessionRequest *request) { return this->server.OnRequest(request); } + Result OnRequest(KSessionRequest *request) { return m_server.OnRequest(request); } - KClientSession &GetClientSession() { return this->client; } - KServerSession &GetServerSession() { return this->server; } - const KClientSession &GetClientSession() const { return this->client; } - const KServerSession &GetServerSession() const { return this->server; } + KClientSession &GetClientSession() { return m_client; } + KServerSession &GetServerSession() { return m_server; } + const KClientSession &GetClientSession() const { return m_client; } + const KServerSession &GetServerSession() const { return m_server; } - const KClientPort *GetParent() const { return this->port; } + const KClientPort *GetParent() const { return m_port; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp index fa278f3af..35e4d3bfe 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp @@ -33,38 +33,38 @@ namespace ams::kern { class Mapping { private: - KProcessAddress client_address; - KProcessAddress server_address; - size_t size; - KMemoryState state; + KProcessAddress m_client_address; + KProcessAddress m_server_address; + size_t m_size; + KMemoryState m_state; public: constexpr void Set(KProcessAddress c, KProcessAddress s, size_t sz, KMemoryState st) { - this->client_address = c; - this->server_address = s; - this->size = sz; - this->state = st; + m_client_address = c; + m_server_address = s; + m_size = sz; + m_state = st; } - constexpr ALWAYS_INLINE KProcessAddress GetClientAddress() const { return this->client_address; } - constexpr ALWAYS_INLINE KProcessAddress GetServerAddress() const { return this->server_address; } - constexpr ALWAYS_INLINE size_t GetSize() const { return this->size; } - constexpr ALWAYS_INLINE KMemoryState GetMemoryState() const { return this->state; } + constexpr ALWAYS_INLINE KProcessAddress GetClientAddress() const { return m_client_address; } + constexpr ALWAYS_INLINE KProcessAddress GetServerAddress() const { return m_server_address; } + constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; } + constexpr ALWAYS_INLINE KMemoryState GetMemoryState() const { return m_state; } }; private: - Mapping static_mappings[NumStaticMappings]; - Mapping *mappings; - u8 num_send; - u8 num_recv; - u8 num_exch; + Mapping m_static_mappings[NumStaticMappings]; + Mapping *m_mappings; + u8 m_num_send; + u8 m_num_recv; + u8 m_num_exch; public: - constexpr explicit SessionMappings() : static_mappings(), mappings(), num_send(), num_recv(), num_exch() { /* ... */ } + constexpr explicit SessionMappings() : m_static_mappings(), m_mappings(), m_num_send(), m_num_recv(), m_num_exch() { /* ... */ } void Initialize() { /* ... */ } void Finalize(); - constexpr ALWAYS_INLINE size_t GetSendCount() const { return this->num_send; } - constexpr ALWAYS_INLINE size_t GetReceiveCount() const { return this->num_recv; } - constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return this->num_exch; } + constexpr ALWAYS_INLINE size_t GetSendCount() const { return m_num_send; } + constexpr ALWAYS_INLINE size_t GetReceiveCount() const { return m_num_recv; } + constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return m_num_exch; } Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state); Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state); @@ -88,49 +88,49 @@ namespace ams::kern { Result PushMap(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state, size_t index); constexpr ALWAYS_INLINE const Mapping &GetSendMapping(size_t i) const { - MESOSPHERE_ASSERT(i < this->num_send); + MESOSPHERE_ASSERT(i < m_num_send); const size_t index = i; if (index < NumStaticMappings) { - return this->static_mappings[index]; + return m_static_mappings[index]; } else { - return this->mappings[index - NumStaticMappings]; + return m_mappings[index - NumStaticMappings]; } } constexpr ALWAYS_INLINE const Mapping &GetReceiveMapping(size_t i) const { - MESOSPHERE_ASSERT(i < this->num_recv); + MESOSPHERE_ASSERT(i < m_num_recv); - const size_t index = this->num_send + i; + const size_t index = m_num_send + i; if (index < NumStaticMappings) { - return this->static_mappings[index]; + return m_static_mappings[index]; } else { - return this->mappings[index - NumStaticMappings]; + return m_mappings[index - NumStaticMappings]; } } constexpr ALWAYS_INLINE const Mapping &GetExchangeMapping(size_t i) const { - MESOSPHERE_ASSERT(i < this->num_exch); + MESOSPHERE_ASSERT(i < m_num_exch); - const size_t index = this->num_send + this->num_recv + i; + const size_t index = m_num_send + m_num_recv + i; if (index < NumStaticMappings) { - return this->static_mappings[index]; + return m_static_mappings[index]; } else { - return this->mappings[index - NumStaticMappings]; + return m_mappings[index - NumStaticMappings]; } } }; private: - SessionMappings mappings; - KThread *thread; - KProcess *server; - KWritableEvent *event; - uintptr_t address; - size_t size; + SessionMappings m_mappings; + KThread *m_thread; + KProcess *m_server; + KWritableEvent *m_event; + uintptr_t m_address; + size_t m_size; public: - constexpr KSessionRequest() : mappings(), thread(), server(), event(), address(), size() { /* ... */ } + constexpr KSessionRequest() : m_mappings(), m_thread(), m_server(), m_event(), m_address(), m_size() { /* ... */ } virtual ~KSessionRequest() { /* ... */ } static KSessionRequest *Create() { @@ -147,79 +147,79 @@ namespace ams::kern { } void Initialize(KWritableEvent *event, uintptr_t address, size_t size) { - this->mappings.Initialize(); + m_mappings.Initialize(); - this->thread = std::addressof(GetCurrentThread()); - this->event = event; - this->address = address; - this->size = size; + m_thread = std::addressof(GetCurrentThread()); + m_event = event; + m_address = address; + m_size = size; - this->thread->Open(); - if (this->event != nullptr) { - this->event->Open(); + m_thread->Open(); + if (m_event != nullptr) { + m_event->Open(); } } virtual void Finalize() override { - this->mappings.Finalize(); + m_mappings.Finalize(); - if (this->thread) { - this->thread->Close(); + if (m_thread) { + m_thread->Close(); } - if (this->event) { - this->event->Close(); + if (m_event) { + m_event->Close(); } - if (this->server) { - this->server->Close(); + if (m_server) { + m_server->Close(); } } static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } - constexpr ALWAYS_INLINE KThread *GetThread() const { return this->thread; } - constexpr ALWAYS_INLINE KWritableEvent *GetEvent() const { return this->event; } - constexpr ALWAYS_INLINE uintptr_t GetAddress() const { return this->address; } - constexpr ALWAYS_INLINE size_t GetSize() const { return this->size; } - constexpr ALWAYS_INLINE KProcess *GetServerProcess() const { return this->server; } + constexpr ALWAYS_INLINE KThread *GetThread() const { return m_thread; } + constexpr ALWAYS_INLINE KWritableEvent *GetEvent() const { return m_event; } + constexpr ALWAYS_INLINE uintptr_t GetAddress() const { return m_address; } + constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; } + constexpr ALWAYS_INLINE KProcess *GetServerProcess() const { return m_server; } void ALWAYS_INLINE SetServerProcess(KProcess *process) { - this->server = process; - this->server->Open(); + m_server = process; + m_server->Open(); } - constexpr ALWAYS_INLINE void ClearThread() { this->thread = nullptr; } - constexpr ALWAYS_INLINE void ClearEvent() { this->event = nullptr; } + constexpr ALWAYS_INLINE void ClearThread() { m_thread = nullptr; } + constexpr ALWAYS_INLINE void ClearEvent() { m_event = nullptr; } - constexpr ALWAYS_INLINE size_t GetSendCount() const { return this->mappings.GetSendCount(); } - constexpr ALWAYS_INLINE size_t GetReceiveCount() const { return this->mappings.GetReceiveCount(); } - constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return this->mappings.GetExchangeCount(); } + constexpr ALWAYS_INLINE size_t GetSendCount() const { return m_mappings.GetSendCount(); } + constexpr ALWAYS_INLINE size_t GetReceiveCount() const { return m_mappings.GetReceiveCount(); } + constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return m_mappings.GetExchangeCount(); } ALWAYS_INLINE Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { - return this->mappings.PushSend(client, server, size, state); + return m_mappings.PushSend(client, server, size, state); } ALWAYS_INLINE Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { - return this->mappings.PushReceive(client, server, size, state); + return m_mappings.PushReceive(client, server, size, state); } ALWAYS_INLINE Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { - return this->mappings.PushExchange(client, server, size, state); + return m_mappings.PushExchange(client, server, size, state); } - constexpr ALWAYS_INLINE KProcessAddress GetSendClientAddress(size_t i) const { return this->mappings.GetSendClientAddress(i); } - constexpr ALWAYS_INLINE KProcessAddress GetSendServerAddress(size_t i) const { return this->mappings.GetSendServerAddress(i); } - constexpr ALWAYS_INLINE size_t GetSendSize(size_t i) const { return this->mappings.GetSendSize(i); } - constexpr ALWAYS_INLINE KMemoryState GetSendMemoryState(size_t i) const { return this->mappings.GetSendMemoryState(i); } + constexpr ALWAYS_INLINE KProcessAddress GetSendClientAddress(size_t i) const { return m_mappings.GetSendClientAddress(i); } + constexpr ALWAYS_INLINE KProcessAddress GetSendServerAddress(size_t i) const { return m_mappings.GetSendServerAddress(i); } + constexpr ALWAYS_INLINE size_t GetSendSize(size_t i) const { return m_mappings.GetSendSize(i); } + constexpr ALWAYS_INLINE KMemoryState GetSendMemoryState(size_t i) const { return m_mappings.GetSendMemoryState(i); } - constexpr ALWAYS_INLINE KProcessAddress GetReceiveClientAddress(size_t i) const { return this->mappings.GetReceiveClientAddress(i); } - constexpr ALWAYS_INLINE KProcessAddress GetReceiveServerAddress(size_t i) const { return this->mappings.GetReceiveServerAddress(i); } - constexpr ALWAYS_INLINE size_t GetReceiveSize(size_t i) const { return this->mappings.GetReceiveSize(i); } - constexpr ALWAYS_INLINE KMemoryState GetReceiveMemoryState(size_t i) const { return this->mappings.GetReceiveMemoryState(i); } + constexpr ALWAYS_INLINE KProcessAddress GetReceiveClientAddress(size_t i) const { return m_mappings.GetReceiveClientAddress(i); } + constexpr ALWAYS_INLINE KProcessAddress GetReceiveServerAddress(size_t i) const { return m_mappings.GetReceiveServerAddress(i); } + constexpr ALWAYS_INLINE size_t GetReceiveSize(size_t i) const { return m_mappings.GetReceiveSize(i); } + constexpr ALWAYS_INLINE KMemoryState GetReceiveMemoryState(size_t i) const { return m_mappings.GetReceiveMemoryState(i); } - constexpr ALWAYS_INLINE KProcessAddress GetExchangeClientAddress(size_t i) const { return this->mappings.GetExchangeClientAddress(i); } - constexpr ALWAYS_INLINE KProcessAddress GetExchangeServerAddress(size_t i) const { return this->mappings.GetExchangeServerAddress(i); } - constexpr ALWAYS_INLINE size_t GetExchangeSize(size_t i) const { return this->mappings.GetExchangeSize(i); } - constexpr ALWAYS_INLINE KMemoryState GetExchangeMemoryState(size_t i) const { return this->mappings.GetExchangeMemoryState(i); } + constexpr ALWAYS_INLINE KProcessAddress GetExchangeClientAddress(size_t i) const { return m_mappings.GetExchangeClientAddress(i); } + constexpr ALWAYS_INLINE KProcessAddress GetExchangeServerAddress(size_t i) const { return m_mappings.GetExchangeServerAddress(i); } + constexpr ALWAYS_INLINE size_t GetExchangeSize(size_t i) const { return m_mappings.GetExchangeSize(i); } + constexpr ALWAYS_INLINE KMemoryState GetExchangeMemoryState(size_t i) const { return m_mappings.GetExchangeMemoryState(i); } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp index 9bbd33a83..d69c22e6a 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp @@ -27,16 +27,16 @@ namespace ams::kern { class KSharedMemory final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject); private: - KPageGroup page_group; - KResourceLimit *resource_limit; - u64 owner_process_id; - ams::svc::MemoryPermission owner_perm; - ams::svc::MemoryPermission remote_perm; - bool is_initialized; + KPageGroup m_page_group; + KResourceLimit *m_resource_limit; + u64 m_owner_process_id; + ams::svc::MemoryPermission m_owner_perm; + ams::svc::MemoryPermission m_remote_perm; + bool m_is_initialized; public: explicit KSharedMemory() - : page_group(std::addressof(Kernel::GetBlockInfoManager())), resource_limit(nullptr), owner_process_id(std::numeric_limits::max()), - owner_perm(ams::svc::MemoryPermission_None), remote_perm(ams::svc::MemoryPermission_None), is_initialized(false) + : m_page_group(std::addressof(Kernel::GetBlockInfoManager())), m_resource_limit(nullptr), m_owner_process_id(std::numeric_limits::max()), + m_owner_perm(ams::svc::MemoryPermission_None), m_remote_perm(ams::svc::MemoryPermission_None), m_is_initialized(false) { /* ... */ } @@ -46,14 +46,14 @@ namespace ams::kern { Result Initialize(KProcess *owner, size_t size, ams::svc::MemoryPermission own_perm, ams::svc::MemoryPermission rem_perm); virtual void Finalize() override; - virtual bool IsInitialized() const override { return this->is_initialized; } + virtual bool IsInitialized() const override { return m_is_initialized; } static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ } Result Map(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process, ams::svc::MemoryPermission map_perm); Result Unmap(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process); - u64 GetOwnerProcessId() const { return this->owner_process_id; } - size_t GetSize() const { return this->page_group.GetNumPages() * PageSize; } + u64 GetOwnerProcessId() const { return m_owner_process_id; } + size_t GetSize() const { return m_page_group.GetNumPages() * PageSize; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory_info.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory_info.hpp index 519cf5aa1..26cd1f3bf 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory_info.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory_info.hpp @@ -23,30 +23,30 @@ namespace ams::kern { class KSharedMemoryInfo : public KSlabAllocated, public util::IntrusiveListBaseNode { private: - KSharedMemory *shared_memory; - size_t reference_count; + KSharedMemory *m_shared_memory; + size_t m_reference_count; public: - constexpr KSharedMemoryInfo() : shared_memory(), reference_count() { /* ... */ } + constexpr KSharedMemoryInfo() : m_shared_memory(), m_reference_count() { /* ... */ } ~KSharedMemoryInfo() { /* ... */ } constexpr void Initialize(KSharedMemory *m) { MESOSPHERE_ASSERT_THIS(); - this->shared_memory = m; - this->reference_count = 0; + m_shared_memory = m; + m_reference_count = 0; } constexpr void Open() { - const size_t ref_count = ++this->reference_count; + const size_t ref_count = ++m_reference_count; MESOSPHERE_ASSERT(ref_count > 0); } constexpr bool Close() { - MESOSPHERE_ASSERT(this->reference_count > 0); - return (--this->reference_count) == 0; + MESOSPHERE_ASSERT(m_reference_count > 0); + return (--m_reference_count) == 0; } - constexpr KSharedMemory *GetSharedMemory() const { return this->shared_memory; } - constexpr size_t GetReferenceCount() const { return this->reference_count; } + constexpr KSharedMemory *GetSharedMemory() const { return m_shared_memory; } + constexpr size_t GetReferenceCount() const { return m_reference_count; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp index 0dfc84814..d3eb06678 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp @@ -44,28 +44,28 @@ namespace ams::kern { Node *next; }; private: - Node * head; - size_t obj_size; + Node * m_head; + size_t m_obj_size; public: - constexpr KSlabHeapImpl() : head(nullptr), obj_size(0) { MESOSPHERE_ASSERT_THIS(); } + constexpr KSlabHeapImpl() : m_head(nullptr), m_obj_size(0) { MESOSPHERE_ASSERT_THIS(); } void Initialize(size_t size) { - MESOSPHERE_INIT_ABORT_UNLESS(this->head == nullptr); - this->obj_size = size; + MESOSPHERE_INIT_ABORT_UNLESS(m_head == nullptr); + m_obj_size = size; } Node *GetHead() const { - return this->head; + return m_head; } size_t GetObjectSize() const { - return this->obj_size; + return m_obj_size; } void *Allocate() { MESOSPHERE_ASSERT_THIS(); - return AllocateFromSlabAtomic(std::addressof(this->head)); + return AllocateFromSlabAtomic(std::addressof(m_head)); } void Free(void *obj) { @@ -73,7 +73,7 @@ namespace ams::kern { Node *node = reinterpret_cast(obj); - return FreeToSlabAtomic(std::addressof(this->head), node); + return FreeToSlabAtomic(std::addressof(m_head), node); } }; @@ -85,22 +85,22 @@ namespace ams::kern { private: using Impl = impl::KSlabHeapImpl; private: - Impl impl; - uintptr_t peak; - uintptr_t start; - uintptr_t end; + Impl m_impl; + uintptr_t m_peak; + uintptr_t m_start; + uintptr_t m_end; private: ALWAYS_INLINE Impl *GetImpl() { - return std::addressof(this->impl); + return std::addressof(m_impl); } ALWAYS_INLINE const Impl *GetImpl() const { - return std::addressof(this->impl); + return std::addressof(m_impl); } public: - constexpr KSlabHeapBase() : impl(), peak(0), start(0), end(0) { MESOSPHERE_ASSERT_THIS(); } + constexpr KSlabHeapBase() : m_impl(), m_peak(0), m_start(0), m_end(0) { MESOSPHERE_ASSERT_THIS(); } ALWAYS_INLINE bool Contains(uintptr_t address) const { - return this->start <= address && address < this->end; + return m_start <= address && address < m_end; } void InitializeImpl(size_t obj_size, void *memory, size_t memory_size) { @@ -114,12 +114,12 @@ namespace ams::kern { /* Set our tracking variables. */ const size_t num_obj = (memory_size / obj_size); - this->start = reinterpret_cast(memory); - this->end = this->start + num_obj * obj_size; - this->peak = this->start; + m_start = reinterpret_cast(memory); + m_end = m_start + num_obj * obj_size; + m_peak = m_start; /* Free the objects. */ - u8 *cur = reinterpret_cast(this->end); + u8 *cur = reinterpret_cast(m_end); for (size_t i = 0; i < num_obj; i++) { cur -= obj_size; @@ -128,7 +128,7 @@ namespace ams::kern { } size_t GetSlabHeapSize() const { - return (this->end - this->start) / this->GetObjectSize(); + return (m_end - m_start) / this->GetObjectSize(); } size_t GetObjectSize() const { @@ -144,10 +144,10 @@ namespace ams::kern { #if defined(MESOSPHERE_BUILD_FOR_DEBUGGING) if (AMS_LIKELY(obj != nullptr)) { static_assert(std::atomic_ref::is_always_lock_free); - std::atomic_ref peak_ref(this->peak); + std::atomic_ref peak_ref(m_peak); const uintptr_t alloc_peak = reinterpret_cast(obj) + this->GetObjectSize(); - uintptr_t cur_peak = this->peak; + uintptr_t cur_peak = m_peak; do { if (alloc_peak <= cur_peak) { break; @@ -169,15 +169,15 @@ namespace ams::kern { } size_t GetObjectIndexImpl(const void *obj) const { - return (reinterpret_cast(obj) - this->start) / this->GetObjectSize(); + return (reinterpret_cast(obj) - m_start) / this->GetObjectSize(); } size_t GetPeakIndex() const { - return this->GetObjectIndexImpl(reinterpret_cast(this->peak)); + return this->GetObjectIndexImpl(reinterpret_cast(m_peak)); } uintptr_t GetSlabHeapAddress() const { - return this->start; + return m_start; } size_t GetNumRemaining() const { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp index f6a0da9ef..f1bf59197 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp @@ -30,10 +30,10 @@ namespace ams::kern { KThread *thread; }; private: - ThreadListNode *thread_list_head; - ThreadListNode *thread_list_tail; + ThreadListNode *m_thread_list_head; + ThreadListNode *m_thread_list_tail; protected: - constexpr ALWAYS_INLINE explicit KSynchronizationObject() : KAutoObjectWithList(), thread_list_head(), thread_list_tail() { MESOSPHERE_ASSERT_THIS(); } + constexpr ALWAYS_INLINE explicit KSynchronizationObject() : KAutoObjectWithList(), m_thread_list_head(), m_thread_list_tail() { MESOSPHERE_ASSERT_THIS(); } virtual ~KSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); } virtual void OnFinalizeSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index 6f9fb4b9c..b985c6537 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -96,44 +96,44 @@ namespace ams::kern { struct QueueEntry { private: - KThread *prev; - KThread *next; + KThread *m_prev; + KThread *m_next; public: - constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ } + constexpr QueueEntry() : m_prev(nullptr), m_next(nullptr) { /* ... */ } constexpr void Initialize() { - this->prev = nullptr; - this->next = nullptr; + m_prev = nullptr; + m_next = nullptr; } - constexpr KThread *GetPrev() const { return this->prev; } - constexpr KThread *GetNext() const { return this->next; } - constexpr void SetPrev(KThread *t) { this->prev = t; } - constexpr void SetNext(KThread *t) { this->next = t; } + constexpr KThread *GetPrev() const { return m_prev; } + constexpr KThread *GetNext() const { return m_next; } + constexpr void SetPrev(KThread *t) { m_prev = t; } + constexpr void SetNext(KThread *t) { m_next = t; } }; using WaiterList = util::IntrusiveListBaseTraits::ListType; private: static constexpr size_t PriorityInheritanceCountMax = 10; union SyncObjectBuffer { - KSynchronizationObject *sync_objects[ams::svc::ArgumentHandleCountMax]; - ams::svc::Handle handles[ams::svc::ArgumentHandleCountMax * (sizeof(KSynchronizationObject *) / sizeof(ams::svc::Handle))]; + KSynchronizationObject *m_sync_objects[ams::svc::ArgumentHandleCountMax]; + ams::svc::Handle m_handles[ams::svc::ArgumentHandleCountMax * (sizeof(KSynchronizationObject *) / sizeof(ams::svc::Handle))]; - constexpr SyncObjectBuffer() : sync_objects() { /* ... */ } + constexpr SyncObjectBuffer() : m_sync_objects() { /* ... */ } }; - static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles)); + static_assert(sizeof(SyncObjectBuffer::m_sync_objects) == sizeof(SyncObjectBuffer::m_handles)); struct ConditionVariableComparator { struct LightCompareType { - uintptr_t cv_key; - s32 priority; + uintptr_t m_cv_key; + s32 m_priority; constexpr ALWAYS_INLINE uintptr_t GetConditionVariableKey() const { - return this->cv_key; + return m_cv_key; } constexpr ALWAYS_INLINE s32 GetPriority() const { - return this->priority; + return m_priority; } }; @@ -158,65 +158,65 @@ namespace ams::kern { private: static inline std::atomic s_next_thread_id = 0; private: - alignas(16) KThreadContext thread_context{}; - util::IntrusiveListNode process_list_node{}; - util::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{}; - s32 priority{}; + alignas(16) KThreadContext m_thread_context{}; + util::IntrusiveListNode m_process_list_node{}; + util::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{}; + s32 m_priority{}; - using ConditionVariableThreadTreeTraits = util::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&KThread::condvar_arbiter_tree_node>; + using ConditionVariableThreadTreeTraits = util::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&KThread::m_condvar_arbiter_tree_node>; using ConditionVariableThreadTree = ConditionVariableThreadTreeTraits::TreeType; - ConditionVariableThreadTree *condvar_tree{}; - uintptr_t condvar_key{}; - u64 virtual_affinity_mask{}; - KAffinityMask physical_affinity_mask{}; - u64 thread_id{}; - std::atomic cpu_time{}; - KSynchronizationObject *synced_object{}; - KProcessAddress address_key{}; - KProcess *parent{}; - void *kernel_stack_top{}; - u32 *light_ipc_data{}; - KProcessAddress tls_address{}; - void *tls_heap_address{}; - KLightLock activity_pause_lock{}; - SyncObjectBuffer sync_object_buffer{}; - s64 schedule_count{}; - s64 last_scheduled_tick{}; - QueueEntry per_core_priority_queue_entry[cpu::NumCores]{}; - KLightLock *waiting_lock{}; + ConditionVariableThreadTree *m_condvar_tree{}; + uintptr_t m_condvar_key{}; + u64 m_virtual_affinity_mask{}; + KAffinityMask m_physical_affinity_mask{}; + u64 m_thread_id{}; + std::atomic m_cpu_time{}; + KSynchronizationObject *m_synced_object{}; + KProcessAddress m_address_key{}; + KProcess *m_parent{}; + void *m_kernel_stack_top{}; + u32 *m_light_ipc_data{}; + KProcessAddress m_tls_address{}; + void *m_tls_heap_address{}; + KLightLock m_activity_pause_lock{}; + SyncObjectBuffer m_sync_object_buffer{}; + s64 m_schedule_count{}; + s64 m_last_scheduled_tick{}; + QueueEntry m_per_core_priority_queue_entry[cpu::NumCores]{}; + KLightLock *m_waiting_lock{}; - KThreadQueue *sleeping_queue{}; + KThreadQueue *m_sleeping_queue{}; - WaiterList waiter_list{}; - WaiterList pinned_waiter_list{}; - KThread *lock_owner{}; - uintptr_t debug_params[3]{}; - u32 address_key_value{}; - u32 suspend_request_flags{}; - u32 suspend_allowed_flags{}; - Result wait_result; - Result debug_exception_result; - s32 base_priority{}; - s32 physical_ideal_core_id{}; - s32 virtual_ideal_core_id{}; - s32 num_kernel_waiters{}; - s32 current_core_id{}; - s32 core_id{}; - KAffinityMask original_physical_affinity_mask{}; - s32 original_physical_ideal_core_id{}; - s32 num_core_migration_disables{}; - ThreadState thread_state{}; - std::atomic termination_requested{}; - bool wait_cancelled{}; - bool cancellable{}; - bool signaled{}; - bool initialized{}; - bool debug_attached{}; - s8 priority_inheritance_count{}; - bool resource_limit_release_hint{}; + WaiterList m_waiter_list{}; + WaiterList m_pinned_waiter_list{}; + KThread *m_lock_owner{}; + uintptr_t m_debug_params[3]{}; + u32 m_address_key_value{}; + u32 m_suspend_request_flags{}; + u32 m_suspend_allowed_flags{}; + Result m_wait_result; + Result m_debug_exception_result; + s32 m_base_priority{}; + s32 m_physical_ideal_core_id{}; + s32 m_virtual_ideal_core_id{}; + s32 m_num_kernel_waiters{}; + s32 m_current_core_id{}; + s32 m_core_id{}; + KAffinityMask m_original_physical_affinity_mask{}; + s32 m_original_physical_ideal_core_id{}; + s32 m_num_core_migration_disables{}; + ThreadState m_thread_state{}; + std::atomic m_termination_requested{}; + bool m_wait_cancelled{}; + bool m_cancellable{}; + bool m_signaled{}; + bool m_initialized{}; + bool m_debug_attached{}; + s8 m_priority_inheritance_count{}; + bool m_resource_limit_release_hint{}; public: - constexpr KThread() : wait_result(svc::ResultNoSynchronizationObject()), debug_exception_result(ResultSuccess()) { /* ... */ } + constexpr KThread() : m_wait_result(svc::ResultNoSynchronizationObject()), m_debug_exception_result(ResultSuccess()) { /* ... */ } virtual ~KThread() { /* ... */ } @@ -240,15 +240,15 @@ namespace ams::kern { static void ResumeThreadsSuspendedForInit(); private: StackParameters &GetStackParameters() { - return *(reinterpret_cast(this->kernel_stack_top) - 1); + return *(reinterpret_cast(m_kernel_stack_top) - 1); } const StackParameters &GetStackParameters() const { - return *(reinterpret_cast(this->kernel_stack_top) - 1); + return *(reinterpret_cast(m_kernel_stack_top) - 1); } public: StackParameters &GetStackParametersForExceptionSvcPermission() { - return *(reinterpret_cast(this->kernel_stack_top) - 1); + return *(reinterpret_cast(m_kernel_stack_top) - 1); } public: ALWAYS_INLINE s32 GetDisableDispatchCount() const { @@ -272,15 +272,15 @@ namespace ams::kern { void Unpin(); ALWAYS_INLINE void SaveDebugParams(uintptr_t param1, uintptr_t param2, uintptr_t param3) { - this->debug_params[0] = param1; - this->debug_params[1] = param2; - this->debug_params[2] = param3; + m_debug_params[0] = param1; + m_debug_params[1] = param2; + m_debug_params[2] = param3; } ALWAYS_INLINE void RestoreDebugParams(uintptr_t *param1, uintptr_t *param2, uintptr_t *param3) { - *param1 = this->debug_params[0]; - *param2 = this->debug_params[1]; - *param3 = this->debug_params[2]; + *param1 = m_debug_params[0]; + *param2 = m_debug_params[1]; + *param3 = m_debug_params[2]; } NOINLINE void DisableCoreMigration(); @@ -336,157 +336,157 @@ namespace ams::kern { void StartTermination(); void FinishTermination(); public: - constexpr u64 GetThreadId() const { return this->thread_id; } + constexpr u64 GetThreadId() const { return m_thread_id; } - constexpr KThreadContext &GetContext() { return this->thread_context; } - constexpr const KThreadContext &GetContext() const { return this->thread_context; } + constexpr KThreadContext &GetContext() { return m_thread_context; } + constexpr const KThreadContext &GetContext() const { return m_thread_context; } - constexpr u64 GetVirtualAffinityMask() const { return this->virtual_affinity_mask; } - constexpr const KAffinityMask &GetAffinityMask() const { return this->physical_affinity_mask; } + constexpr u64 GetVirtualAffinityMask() const { return m_virtual_affinity_mask; } + constexpr const KAffinityMask &GetAffinityMask() const { return m_physical_affinity_mask; } Result GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask); Result SetCoreMask(int32_t ideal_core, u64 affinity_mask); Result GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask); - constexpr ThreadState GetState() const { return static_cast(this->thread_state & ThreadState_Mask); } - constexpr ThreadState GetRawState() const { return this->thread_state; } + constexpr ThreadState GetState() const { return static_cast(m_thread_state & ThreadState_Mask); } + constexpr ThreadState GetRawState() const { return m_thread_state; } NOINLINE void SetState(ThreadState state); NOINLINE KThreadContext *GetContextForSchedulerLoop(); - constexpr uintptr_t GetConditionVariableKey() const { return this->condvar_key; } - constexpr uintptr_t GetAddressArbiterKey() const { return this->condvar_key; } + constexpr uintptr_t GetConditionVariableKey() const { return m_condvar_key; } + constexpr uintptr_t GetAddressArbiterKey() const { return m_condvar_key; } constexpr void SetConditionVariable(ConditionVariableThreadTree *tree, KProcessAddress address, uintptr_t cv_key, u32 value) { - this->condvar_tree = tree; - this->condvar_key = cv_key; - this->address_key = address; - this->address_key_value = value; + m_condvar_tree = tree; + m_condvar_key = cv_key; + m_address_key = address; + m_address_key_value = value; } constexpr void ClearConditionVariable() { - this->condvar_tree = nullptr; + m_condvar_tree = nullptr; } constexpr bool IsWaitingForConditionVariable() const { - return this->condvar_tree != nullptr; + return m_condvar_tree != nullptr; } constexpr void SetAddressArbiter(ConditionVariableThreadTree *tree, uintptr_t address) { - this->condvar_tree = tree; - this->condvar_key = address; + m_condvar_tree = tree; + m_condvar_key = address; } constexpr void ClearAddressArbiter() { - this->condvar_tree = nullptr; + m_condvar_tree = nullptr; } constexpr bool IsWaitingForAddressArbiter() const { - return this->condvar_tree != nullptr; + return m_condvar_tree != nullptr; } - constexpr s32 GetIdealVirtualCore() const { return this->virtual_ideal_core_id; } - constexpr s32 GetIdealPhysicalCore() const { return this->physical_ideal_core_id; } + constexpr s32 GetIdealVirtualCore() const { return m_virtual_ideal_core_id; } + constexpr s32 GetIdealPhysicalCore() const { return m_physical_ideal_core_id; } - constexpr s32 GetActiveCore() const { return this->core_id; } - constexpr void SetActiveCore(s32 core) { this->core_id = core; } + constexpr s32 GetActiveCore() const { return m_core_id; } + constexpr void SetActiveCore(s32 core) { m_core_id = core; } - constexpr ALWAYS_INLINE s32 GetCurrentCore() const { return this->current_core_id; } - constexpr void SetCurrentCore(s32 core) { this->current_core_id = core; } + constexpr ALWAYS_INLINE s32 GetCurrentCore() const { return m_current_core_id; } + constexpr void SetCurrentCore(s32 core) { m_current_core_id = core; } - constexpr s32 GetPriority() const { return this->priority; } - constexpr void SetPriority(s32 prio) { this->priority = prio; } + constexpr s32 GetPriority() const { return m_priority; } + constexpr void SetPriority(s32 prio) { m_priority = prio; } - constexpr s32 GetBasePriority() const { return this->base_priority; } + constexpr s32 GetBasePriority() const { return m_base_priority; } - constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; } - constexpr const QueueEntry &GetPriorityQueueEntry(s32 core) const { return this->per_core_priority_queue_entry[core]; } + constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return m_per_core_priority_queue_entry[core]; } + constexpr const QueueEntry &GetPriorityQueueEntry(s32 core) const { return m_per_core_priority_queue_entry[core]; } - constexpr void SetSleepingQueue(KThreadQueue *q) { this->sleeping_queue = q; } + constexpr void SetSleepingQueue(KThreadQueue *q) { m_sleeping_queue = q; } - constexpr ConditionVariableThreadTree *GetConditionVariableTree() const { return this->condvar_tree; } + constexpr ConditionVariableThreadTree *GetConditionVariableTree() const { return m_condvar_tree; } - constexpr s32 GetNumKernelWaiters() const { return this->num_kernel_waiters; } + constexpr s32 GetNumKernelWaiters() const { return m_num_kernel_waiters; } void AddWaiter(KThread *thread); void RemoveWaiter(KThread *thread); KThread *RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key); - constexpr KProcessAddress GetAddressKey() const { return this->address_key; } - constexpr u32 GetAddressKeyValue() const { return this->address_key_value; } - constexpr void SetAddressKey(KProcessAddress key) { this->address_key = key; } - constexpr void SetAddressKey(KProcessAddress key, u32 val) { this->address_key = key; this->address_key_value = val; } + constexpr KProcessAddress GetAddressKey() const { return m_address_key; } + constexpr u32 GetAddressKeyValue() const { return m_address_key_value; } + constexpr void SetAddressKey(KProcessAddress key) { m_address_key = key; } + constexpr void SetAddressKey(KProcessAddress key, u32 val) { m_address_key = key; m_address_key_value = val; } - constexpr void SetLockOwner(KThread *owner) { this->lock_owner = owner; } - constexpr KThread *GetLockOwner() const { return this->lock_owner; } + constexpr void SetLockOwner(KThread *owner) { m_lock_owner = owner; } + constexpr KThread *GetLockOwner() const { return m_lock_owner; } constexpr void SetSyncedObject(KSynchronizationObject *obj, Result wait_res) { MESOSPHERE_ASSERT_THIS(); - this->synced_object = obj; - this->wait_result = wait_res; + m_synced_object = obj; + m_wait_result = wait_res; } constexpr Result GetWaitResult(KSynchronizationObject **out) const { MESOSPHERE_ASSERT_THIS(); - *out = this->synced_object; - return this->wait_result; + *out = m_synced_object; + return m_wait_result; } constexpr void SetDebugExceptionResult(Result result) { MESOSPHERE_ASSERT_THIS(); - this->debug_exception_result = result; + m_debug_exception_result = result; } constexpr Result GetDebugExceptionResult() const { MESOSPHERE_ASSERT_THIS(); - return this->debug_exception_result; + return m_debug_exception_result; } void WaitCancel(); - bool IsWaitCancelled() const { return this->wait_cancelled; } - void ClearWaitCancelled() { this->wait_cancelled = false; } + bool IsWaitCancelled() const { return m_wait_cancelled; } + void ClearWaitCancelled() { m_wait_cancelled = false; } - void ClearCancellable() { this->cancellable = false; } - void SetCancellable() { this->cancellable = true; } + void ClearCancellable() { m_cancellable = false; } + void SetCancellable() { m_cancellable = true; } - constexpr u32 *GetLightSessionData() const { return this->light_ipc_data; } - constexpr void SetLightSessionData(u32 *data) { this->light_ipc_data = data; } + constexpr u32 *GetLightSessionData() const { return m_light_ipc_data; } + constexpr void SetLightSessionData(u32 *data) { m_light_ipc_data = data; } - bool HasWaiters() const { return !this->waiter_list.empty(); } + bool HasWaiters() const { return !m_waiter_list.empty(); } - constexpr s64 GetLastScheduledTick() const { return this->last_scheduled_tick; } - constexpr void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; } + constexpr s64 GetLastScheduledTick() const { return m_last_scheduled_tick; } + constexpr void SetLastScheduledTick(s64 tick) { m_last_scheduled_tick = tick; } - constexpr s64 GetYieldScheduleCount() const { return this->schedule_count; } - constexpr void SetYieldScheduleCount(s64 count) { this->schedule_count = count; } + constexpr s64 GetYieldScheduleCount() const { return m_schedule_count; } + constexpr void SetYieldScheduleCount(s64 count) { m_schedule_count = count; } - constexpr KProcess *GetOwnerProcess() const { return this->parent; } - constexpr bool IsUserThread() const { return this->parent != nullptr; } + constexpr KProcess *GetOwnerProcess() const { return m_parent; } + constexpr bool IsUserThread() const { return m_parent != nullptr; } - constexpr KProcessAddress GetThreadLocalRegionAddress() const { return this->tls_address; } - constexpr void *GetThreadLocalRegionHeapAddress() const { return this->tls_heap_address; } + constexpr KProcessAddress GetThreadLocalRegionAddress() const { return m_tls_address; } + constexpr void *GetThreadLocalRegionHeapAddress() const { return m_tls_heap_address; } - constexpr KSynchronizationObject **GetSynchronizationObjectBuffer() { return std::addressof(this->sync_object_buffer.sync_objects[0]); } - constexpr ams::svc::Handle *GetHandleBuffer() { return std::addressof(this->sync_object_buffer.handles[sizeof(this->sync_object_buffer.sync_objects) / sizeof(ams::svc::Handle) - ams::svc::ArgumentHandleCountMax]); } + constexpr KSynchronizationObject **GetSynchronizationObjectBuffer() { return std::addressof(m_sync_object_buffer.m_sync_objects[0]); } + constexpr ams::svc::Handle *GetHandleBuffer() { return std::addressof(m_sync_object_buffer.m_handles[sizeof(m_sync_object_buffer.m_sync_objects) / sizeof(ams::svc::Handle) - ams::svc::ArgumentHandleCountMax]); } - u16 GetUserDisableCount() const { return static_cast(this->tls_heap_address)->disable_count; } - void SetInterruptFlag() const { static_cast(this->tls_heap_address)->interrupt_flag = 1; } - void ClearInterruptFlag() const { static_cast(this->tls_heap_address)->interrupt_flag = 0; } + u16 GetUserDisableCount() const { return static_cast(m_tls_heap_address)->disable_count; } + void SetInterruptFlag() const { static_cast(m_tls_heap_address)->interrupt_flag = 1; } + void ClearInterruptFlag() const { static_cast(m_tls_heap_address)->interrupt_flag = 0; } - constexpr void SetDebugAttached() { this->debug_attached = true; } - constexpr bool IsAttachedToDebugger() const { return this->debug_attached; } + constexpr void SetDebugAttached() { m_debug_attached = true; } + constexpr bool IsAttachedToDebugger() const { return m_debug_attached; } void AddCpuTime(s32 core_id, s64 amount) { - this->cpu_time += amount; + m_cpu_time += amount; /* TODO: Debug kernels track per-core tick counts. Should we? */ MESOSPHERE_UNUSED(core_id); } - s64 GetCpuTime() const { return this->cpu_time; } + s64 GetCpuTime() const { return m_cpu_time; } s64 GetCpuTime(s32 core_id) const { MESOSPHERE_ABORT_UNLESS(0 <= core_id && core_id < static_cast(cpu::NumCores)); @@ -495,10 +495,10 @@ namespace ams::kern { return 0; } - constexpr u32 GetSuspendFlags() const { return this->suspend_allowed_flags & this->suspend_request_flags; } + constexpr u32 GetSuspendFlags() const { return m_suspend_allowed_flags & m_suspend_request_flags; } constexpr bool IsSuspended() const { return this->GetSuspendFlags() != 0; } - constexpr bool IsSuspendRequested(SuspendType type) const { return (this->suspend_request_flags & (1u << (ThreadState_SuspendShift + type))) != 0; } - constexpr bool IsSuspendRequested() const { return this->suspend_request_flags != 0; } + constexpr bool IsSuspendRequested(SuspendType type) const { return (m_suspend_request_flags & (1u << (ThreadState_SuspendShift + type))) != 0; } + constexpr bool IsSuspendRequested() const { return m_suspend_request_flags != 0; } void RequestSuspend(SuspendType type); void Resume(SuspendType type); void TrySuspend(); @@ -526,11 +526,11 @@ namespace ams::kern { Result Sleep(s64 timeout); - ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast(this->kernel_stack_top) - 1; } - ALWAYS_INLINE void *GetKernelStackTop() const { return this->kernel_stack_top; } + ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast(m_kernel_stack_top) - 1; } + ALWAYS_INLINE void *GetKernelStackTop() const { return m_kernel_stack_top; } ALWAYS_INLINE bool IsTerminationRequested() const { - return this->termination_requested || this->GetRawState() == ThreadState_Terminated; + return m_termination_requested || this->GetRawState() == ThreadState_Terminated; } size_t GetKernelStackUsage() const; @@ -538,8 +538,8 @@ namespace ams::kern { /* Overridden parent functions. */ virtual u64 GetId() const override final { return this->GetThreadId(); } - virtual bool IsInitialized() const override { return this->initialized; } - virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->parent) | (this->resource_limit_release_hint ? 1 : 0); } + virtual bool IsInitialized() const override { return m_initialized; } + virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(m_parent) | (m_resource_limit_release_hint ? 1 : 0); } static void PostDestroy(uintptr_t arg); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp index 8670bdb64..a41c49694 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp @@ -29,19 +29,19 @@ namespace ams::kern { static constexpr size_t RegionsPerPage = PageSize / ams::svc::ThreadLocalRegionSize; static_assert(RegionsPerPage > 0); private: - KProcessAddress virt_addr; - KProcess *owner; - bool is_region_free[RegionsPerPage]; + KProcessAddress m_virt_addr; + KProcess *m_owner; + bool m_is_region_free[RegionsPerPage]; public: - constexpr explicit KThreadLocalPage(KProcessAddress addr) : virt_addr(addr), owner(nullptr), is_region_free() { + constexpr explicit KThreadLocalPage(KProcessAddress addr) : m_virt_addr(addr), m_owner(nullptr), m_is_region_free() { for (size_t i = 0; i < RegionsPerPage; i++) { - this->is_region_free[i] = true; + m_is_region_free[i] = true; } } constexpr explicit KThreadLocalPage() : KThreadLocalPage(Null) { /* ... */ } - constexpr ALWAYS_INLINE KProcessAddress GetAddress() const { return this->virt_addr; } + constexpr ALWAYS_INLINE KProcessAddress GetAddress() const { return m_virt_addr; } static constexpr ALWAYS_INLINE int Compare(const KThreadLocalPage &lhs, const KThreadLocalPage &rhs) { const KProcessAddress lval = lhs.GetAddress(); @@ -80,7 +80,7 @@ namespace ams::kern { bool IsAllUsed() const { for (size_t i = 0; i < RegionsPerPage; i++) { - if (this->is_region_free[i]) { + if (m_is_region_free[i]) { return false; } } @@ -89,7 +89,7 @@ namespace ams::kern { bool IsAllFree() const { for (size_t i = 0; i < RegionsPerPage; i++) { - if (!this->is_region_free[i]) { + if (!m_is_region_free[i]) { return false; } } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread_queue.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread_queue.hpp index fc18b564b..e7e615128 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread_queue.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread_queue.hpp @@ -21,14 +21,14 @@ namespace ams::kern { class KThreadQueue { private: - KThread::WaiterList wait_list; + KThread::WaiterList m_wait_list; public: - constexpr ALWAYS_INLINE KThreadQueue() : wait_list() { /* ... */ } + constexpr ALWAYS_INLINE KThreadQueue() : m_wait_list() { /* ... */ } - bool IsEmpty() const { return this->wait_list.empty(); } + bool IsEmpty() const { return m_wait_list.empty(); } - KThread::WaiterList::iterator begin() { return this->wait_list.begin(); } - KThread::WaiterList::iterator end() { return this->wait_list.end(); } + KThread::WaiterList::iterator begin() { return m_wait_list.begin(); } + KThread::WaiterList::iterator end() { return m_wait_list.end(); } bool SleepThread(KThread *t) { KScopedSchedulerLock sl; @@ -43,7 +43,7 @@ namespace ams::kern { t->SetState(KThread::ThreadState_Waiting); /* Add the thread to the queue. */ - this->wait_list.push_back(*t); + m_wait_list.push_back(*t); return true; } @@ -52,7 +52,7 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Remove the thread from the queue. */ - this->wait_list.erase(this->wait_list.iterator_to(*t)); + m_wait_list.erase(m_wait_list.iterator_to(*t)); /* Mark the thread as no longer sleeping. */ t->SetState(KThread::ThreadState_Runnable); @@ -62,13 +62,13 @@ namespace ams::kern { KThread *WakeupFrontThread() { KScopedSchedulerLock sl; - if (this->wait_list.empty()) { + if (m_wait_list.empty()) { return nullptr; } else { /* Remove the thread from the queue. */ - auto it = this->wait_list.begin(); + auto it = m_wait_list.begin(); KThread *thread = std::addressof(*it); - this->wait_list.erase(it); + m_wait_list.erase(it); MESOSPHERE_ASSERT(thread->GetState() == KThread::ThreadState_Waiting); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp index dba2e1c16..6efafe7f7 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp @@ -20,7 +20,7 @@ namespace ams::kern { class KTimerTask : public util::IntrusiveRedBlackTreeBaseNode { private: - s64 time; + s64 m_time; public: static constexpr ALWAYS_INLINE int Compare(const KTimerTask &lhs, const KTimerTask &rhs) { if (lhs.GetTime() < rhs.GetTime()) { @@ -30,14 +30,14 @@ namespace ams::kern { } } public: - constexpr ALWAYS_INLINE KTimerTask() : time(0) { /* ... */ } + constexpr ALWAYS_INLINE KTimerTask() : m_time(0) { /* ... */ } constexpr ALWAYS_INLINE void SetTime(s64 t) { - this->time = t; + m_time = t; } constexpr ALWAYS_INLINE s64 GetTime() const { - return this->time; + return m_time; } virtual void OnTimer() = 0; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp index a0000202a..831b35824 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp @@ -23,15 +23,15 @@ namespace ams::kern { class KTransferMemory final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject); private: - TYPED_STORAGE(KPageGroup) page_group; - KProcess *owner; - KProcessAddress address; - KLightLock lock; - ams::svc::MemoryPermission owner_perm; - bool is_initialized; - bool is_mapped; + TYPED_STORAGE(KPageGroup) m_page_group; + KProcess *m_owner; + KProcessAddress m_address; + KLightLock m_lock; + ams::svc::MemoryPermission m_owner_perm; + bool m_is_initialized; + bool m_is_mapped; public: - explicit KTransferMemory() : owner(nullptr), address(Null), owner_perm(ams::svc::MemoryPermission_None), is_initialized(false), is_mapped(false) { + explicit KTransferMemory() : m_owner(nullptr), m_address(Null), m_owner_perm(ams::svc::MemoryPermission_None), m_is_initialized(false), m_is_mapped(false) { /* ... */ } @@ -40,16 +40,16 @@ namespace ams::kern { Result Initialize(KProcessAddress addr, size_t size, ams::svc::MemoryPermission own_perm); virtual void Finalize() override; - virtual bool IsInitialized() const override { return this->is_initialized; } - virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->owner); } + virtual bool IsInitialized() const override { return m_is_initialized; } + virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(m_owner); } static void PostDestroy(uintptr_t arg); Result Map(KProcessAddress address, size_t size, ams::svc::MemoryPermission map_perm); Result Unmap(KProcessAddress address, size_t size); - KProcess *GetOwner() const { return this->owner; } - KProcessAddress GetSourceAddress() { return this->address; } - size_t GetSize() const { return this->is_initialized ? GetReference(this->page_group).GetNumPages() * PageSize : 0; } + KProcess *GetOwner() const { return m_owner; } + KProcessAddress GetSourceAddress() { return m_address; } + size_t GetSize() const { return m_is_initialized ? GetReference(m_page_group).GetNumPages() * PageSize : 0; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp index 7144bf5b4..3cdee70f3 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp @@ -23,13 +23,13 @@ namespace ams::kern { template class KTypedAddress { private: - uintptr_t address; + uintptr_t m_address; public: /* Constructors. */ - constexpr ALWAYS_INLINE KTypedAddress() : address(0) { /* ... */ } - constexpr ALWAYS_INLINE KTypedAddress(uintptr_t a) : address(a) { /* ... */ } + constexpr ALWAYS_INLINE KTypedAddress() : m_address(0) { /* ... */ } + constexpr ALWAYS_INLINE KTypedAddress(uintptr_t a) : m_address(a) { /* ... */ } template - constexpr ALWAYS_INLINE explicit KTypedAddress(U *ptr) : address(reinterpret_cast(ptr)) { /* ... */ } + constexpr ALWAYS_INLINE explicit KTypedAddress(U *ptr) : m_address(reinterpret_cast(ptr)) { /* ... */ } /* Copy constructor. */ constexpr ALWAYS_INLINE KTypedAddress(const KTypedAddress &rhs) = default; @@ -41,92 +41,92 @@ namespace ams::kern { template constexpr ALWAYS_INLINE KTypedAddress operator+(I rhs) const { static_assert(std::is_integral::value); - return this->address + rhs; + return m_address + rhs; } template constexpr ALWAYS_INLINE KTypedAddress operator-(I rhs) const { static_assert(std::is_integral::value); - return this->address - rhs; + return m_address - rhs; } constexpr ALWAYS_INLINE ptrdiff_t operator-(KTypedAddress rhs) const { - return this->address - rhs.address; + return m_address - rhs.m_address; } template constexpr ALWAYS_INLINE KTypedAddress operator+=(I rhs) { static_assert(std::is_integral::value); - this->address += rhs; + m_address += rhs; return *this; } template constexpr ALWAYS_INLINE KTypedAddress operator-=(I rhs) { static_assert(std::is_integral::value); - this->address -= rhs; + m_address -= rhs; return *this; } /* Logical operators. */ constexpr ALWAYS_INLINE uintptr_t operator&(uintptr_t mask) const { - return this->address & mask; + return m_address & mask; } constexpr ALWAYS_INLINE uintptr_t operator|(uintptr_t mask) const { - return this->address | mask; + return m_address | mask; } constexpr ALWAYS_INLINE uintptr_t operator<<(int shift) const { - return this->address << shift; + return m_address << shift; } constexpr ALWAYS_INLINE uintptr_t operator>>(int shift) const { - return this->address >> shift; + return m_address >> shift; } template - constexpr ALWAYS_INLINE size_t operator/(U size) const { return this->address / size; } + constexpr ALWAYS_INLINE size_t operator/(U size) const { return m_address / size; } - /* constexpr ALWAYS_INLINE uintptr_t operator%(U align) const { return this->address % align; } */ + /* constexpr ALWAYS_INLINE uintptr_t operator%(U align) const { return m_address % align; } */ /* Comparison operators. */ constexpr ALWAYS_INLINE bool operator==(KTypedAddress rhs) const { - return this->address == rhs.address; + return m_address == rhs.m_address; } constexpr ALWAYS_INLINE bool operator!=(KTypedAddress rhs) const { - return this->address != rhs.address; + return m_address != rhs.m_address; } constexpr ALWAYS_INLINE bool operator<(KTypedAddress rhs) const { - return this->address < rhs.address; + return m_address < rhs.m_address; } constexpr ALWAYS_INLINE bool operator<=(KTypedAddress rhs) const { - return this->address <= rhs.address; + return m_address <= rhs.m_address; } constexpr ALWAYS_INLINE bool operator>(KTypedAddress rhs) const { - return this->address > rhs.address; + return m_address > rhs.m_address; } constexpr ALWAYS_INLINE bool operator>=(KTypedAddress rhs) const { - return this->address >= rhs.address; + return m_address >= rhs.m_address; } /* For convenience, also define comparison operators versus uintptr_t. */ constexpr ALWAYS_INLINE bool operator==(uintptr_t rhs) const { - return this->address == rhs; + return m_address == rhs; } constexpr ALWAYS_INLINE bool operator!=(uintptr_t rhs) const { - return this->address != rhs; + return m_address != rhs; } /* Allow getting the address explicitly, for use in accessors. */ constexpr ALWAYS_INLINE uintptr_t GetValue() const { - return this->address; + return m_address; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_unsafe_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_unsafe_memory.hpp index 713d240fd..9c7897708 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_unsafe_memory.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_unsafe_memory.hpp @@ -21,57 +21,57 @@ namespace ams::kern { class KUnsafeMemory { private: - mutable KLightLock lock; - size_t limit_size; - size_t current_size; + mutable KLightLock m_lock; + size_t m_limit_size; + size_t m_current_size; public: - constexpr KUnsafeMemory() : lock(), limit_size(), current_size() { /* ... */ } + constexpr KUnsafeMemory() : m_lock(), m_limit_size(), m_current_size() { /* ... */ } bool TryReserve(size_t size) { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Test for overflow. */ - if (this->current_size > this->current_size + size) { + if (m_current_size > m_current_size + size) { return false; } /* Test for limit allowance. */ - if (this->current_size + size > this->limit_size) { + if (m_current_size + size > m_limit_size) { return false; } /* Reserve the size. */ - this->current_size += size; + m_current_size += size; return true; } void Release(size_t size) { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); - MESOSPHERE_ABORT_UNLESS(this->current_size >= size); - this->current_size -= size; + MESOSPHERE_ABORT_UNLESS(m_current_size >= size); + m_current_size -= size; } size_t GetLimitSize() const { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); - return this->limit_size; + KScopedLightLock lk(m_lock); + return m_limit_size; } size_t GetCurrentSize() const { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); - return this->current_size; + KScopedLightLock lk(m_lock); + return m_current_size; } Result SetLimitSize(size_t size) { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); - R_UNLESS(size >= this->current_size, svc::ResultLimitReached()); - this->limit_size = size; + R_UNLESS(size >= m_current_size, svc::ResultLimitReached()); + m_limit_size = size; return ResultSuccess(); } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp index 16041e411..5424bd772 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp @@ -22,10 +22,10 @@ namespace ams::kern { class KWaitObject : public KTimerTask { private: - KThread::WaiterList wait_list; - bool timer_used; + KThread::WaiterList m_wait_list; + bool m_timer_used; public: - constexpr KWaitObject() : wait_list(), timer_used() { /* ... */ } + constexpr KWaitObject() : m_wait_list(), m_timer_used() { /* ... */ } virtual void OnTimer() override; Result Synchronize(s64 timeout); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp index 7620adb6a..4ec6625b7 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp @@ -20,12 +20,12 @@ namespace ams::kern { class KWorkerTask { private: - KWorkerTask *next_task; + KWorkerTask *m_next_task; public: - constexpr ALWAYS_INLINE KWorkerTask() : next_task(nullptr) { /* ... */ } + constexpr ALWAYS_INLINE KWorkerTask() : m_next_task(nullptr) { /* ... */ } - constexpr ALWAYS_INLINE KWorkerTask *GetNextTask() const { return this->next_task; } - constexpr ALWAYS_INLINE void SetNextTask(KWorkerTask *task) { this->next_task = task; } + constexpr ALWAYS_INLINE KWorkerTask *GetNextTask() const { return m_next_task; } + constexpr ALWAYS_INLINE void SetNextTask(KWorkerTask *task) { m_next_task = task; } virtual void DoWorkerTask() = 0; }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_worker_task_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_worker_task_manager.hpp index 6e26d6af9..89d95493c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_worker_task_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_worker_task_manager.hpp @@ -30,11 +30,11 @@ namespace ams::kern { WorkerType_Count, }; private: - KWorkerTask *head_task; - KWorkerTask *tail_task; - KThread *thread; - WorkerType type; - bool active; + KWorkerTask *m_head_task; + KWorkerTask *m_tail_task; + KThread *m_thread; + WorkerType m_type; + bool m_active; private: static void ThreadFunction(uintptr_t arg); void ThreadFunctionImpl(); @@ -42,7 +42,7 @@ namespace ams::kern { KWorkerTask *GetTask(); void AddTask(KWorkerTask *task); public: - constexpr KWorkerTaskManager() : head_task(), tail_task(), thread(), type(WorkerType_Count), active() { /* ... */ } + constexpr KWorkerTaskManager() : m_head_task(), m_tail_task(), m_thread(), m_type(WorkerType_Count), m_active() { /* ... */ } NOINLINE void Initialize(WorkerType wt, s32 priority); static void AddTask(WorkerType type, KWorkerTask *task); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_writable_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_writable_event.hpp index f3ee813d6..ffdbbb57e 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_writable_event.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_writable_event.hpp @@ -25,9 +25,9 @@ namespace ams::kern { class KWritableEvent final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject); private: - KEvent *parent; + KEvent *m_parent; public: - constexpr explicit KWritableEvent() : parent(nullptr) { /* ... */ } + constexpr explicit KWritableEvent() : m_parent(nullptr) { /* ... */ } virtual ~KWritableEvent() { /* ... */ } virtual void Destroy() override; @@ -38,7 +38,7 @@ namespace ams::kern { Result Signal(); Result Clear(); - constexpr KEvent *GetParent() const { return this->parent; } + constexpr KEvent *GetParent() const { return m_parent; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp index 178ce8c36..10169d2e1 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp @@ -39,20 +39,20 @@ namespace ams::kern { NON_COPYABLE(KScopedInterruptDisable); NON_MOVEABLE(KScopedInterruptDisable); private: - u32 prev_intr_state; + u32 m_prev_intr_state; public: - ALWAYS_INLINE KScopedInterruptDisable() : prev_intr_state(KInterruptManager::DisableInterrupts()) { /* ... */ } - ALWAYS_INLINE ~KScopedInterruptDisable() { KInterruptManager::RestoreInterrupts(prev_intr_state); } + ALWAYS_INLINE KScopedInterruptDisable() : m_prev_intr_state(KInterruptManager::DisableInterrupts()) { /* ... */ } + ALWAYS_INLINE ~KScopedInterruptDisable() { KInterruptManager::RestoreInterrupts(m_prev_intr_state); } }; class KScopedInterruptEnable { NON_COPYABLE(KScopedInterruptEnable); NON_MOVEABLE(KScopedInterruptEnable); private: - u32 prev_intr_state; + u32 m_prev_intr_state; public: - ALWAYS_INLINE KScopedInterruptEnable() : prev_intr_state(KInterruptManager::EnableInterrupts()) { /* ... */ } - ALWAYS_INLINE ~KScopedInterruptEnable() { KInterruptManager::RestoreInterrupts(prev_intr_state); } + ALWAYS_INLINE KScopedInterruptEnable() : m_prev_intr_state(KInterruptManager::EnableInterrupts()) { /* ... */ } + ALWAYS_INLINE ~KScopedInterruptEnable() { KInterruptManager::RestoreInterrupts(m_prev_intr_state); } }; } diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp index 5d707d52e..195c13dba 100644 --- a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp @@ -108,28 +108,28 @@ namespace ams::kern::svc { using CT = typename std::remove_pointer<_T>::type; using T = typename std::remove_const::type; private: - CT *ptr; + CT *m_ptr; private: Result CopyToImpl(void *p, size_t size) const { - return Traits::CopyFromUserspace(p, this->ptr, size); + return Traits::CopyFromUserspace(p, m_ptr, size); } Result CopyFromImpl(const void *p, size_t size) const { - return Traits::CopyToUserspace(this->ptr, p, size); + return Traits::CopyToUserspace(m_ptr, p, size); } protected: Result CopyTo(T *p) const { return this->CopyToImpl(p, sizeof(*p)); } Result CopyFrom(const T *p) const { return this->CopyFromImpl(p, sizeof(*p)); } - Result CopyArrayElementTo(T *p, size_t index) const { return Traits::CopyFromUserspace(p, this->ptr + index, sizeof(*p)); } - Result CopyArrayElementFrom(const T *p, size_t index) const { return Traits::CopyToUserspace(this->ptr + index, p, sizeof(*p)); } + Result CopyArrayElementTo(T *p, size_t index) const { return Traits::CopyFromUserspace(p, m_ptr + index, sizeof(*p)); } + Result CopyArrayElementFrom(const T *p, size_t index) const { return Traits::CopyToUserspace(m_ptr + index, p, sizeof(*p)); } Result CopyArrayTo(T *arr, size_t count) const { return this->CopyToImpl(arr, sizeof(*arr) * count); } Result CopyArrayFrom(const T *arr, size_t count) const { return this->CopyFromImpl(arr, sizeof(*arr) * count); } - constexpr bool IsNull() const { return this->ptr == nullptr; } + constexpr bool IsNull() const { return m_ptr == nullptr; } - constexpr CT *GetUnsafePointer() const { return this->ptr; } + constexpr CT *GetUnsafePointer() const { return m_ptr; } }; template<> diff --git a/libraries/libmesosphere/source/arch/arm/kern_generic_interrupt_controller.inc b/libraries/libmesosphere/source/arch/arm/kern_generic_interrupt_controller.inc index 07d2e22b1..4f35bb25c 100644 --- a/libraries/libmesosphere/source/arch/arm/kern_generic_interrupt_controller.inc +++ b/libraries/libmesosphere/source/arch/arm/kern_generic_interrupt_controller.inc @@ -18,47 +18,47 @@ namespace ams::kern::arch::arm { void KInterruptController::SetupInterruptLines(s32 core_id) const { - const size_t ITLines = (core_id == 0) ? 32 * ((this->gicd->typer & 0x1F) + 1) : NumLocalInterrupts; + const size_t ITLines = (core_id == 0) ? 32 * ((m_gicd->typer & 0x1F) + 1) : NumLocalInterrupts; for (size_t i = 0; i < ITLines / 32; i++) { - this->gicd->icenabler[i] = 0xFFFFFFFF; - this->gicd->icpendr[i] = 0xFFFFFFFF; - this->gicd->icactiver[i] = 0xFFFFFFFF; - this->gicd->igroupr[i] = 0; + m_gicd->icenabler[i] = 0xFFFFFFFF; + m_gicd->icpendr[i] = 0xFFFFFFFF; + m_gicd->icactiver[i] = 0xFFFFFFFF; + m_gicd->igroupr[i] = 0; } for (size_t i = 0; i < ITLines; i++) { - this->gicd->ipriorityr.bytes[i] = 0xFF; - this->gicd->itargetsr.bytes[i] = 0x00; + m_gicd->ipriorityr.bytes[i] = 0xFF; + m_gicd->itargetsr.bytes[i] = 0x00; } for (size_t i = 0; i < ITLines / 16; i++) { - this->gicd->icfgr[i] = 0x00000000; + m_gicd->icfgr[i] = 0x00000000; } } void KInterruptController::Initialize(s32 core_id) { /* Setup pointers to ARM mmio. */ - this->gicd = GetPointer(KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_InterruptDistributor)); - this->gicc = GetPointer(KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_InterruptCpuInterface)); + m_gicd = GetPointer(KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_InterruptDistributor)); + m_gicc = GetPointer(KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_InterruptCpuInterface)); /* Clear CTLRs. */ - this->gicc->ctlr = 0; + m_gicc->ctlr = 0; if (core_id == 0) { - this->gicd->ctlr = 0; + m_gicd->ctlr = 0; } - this->gicc->pmr = 0; - this->gicc->bpr = 7; + m_gicc->pmr = 0; + m_gicc->bpr = 7; /* Setup all interrupt lines. */ SetupInterruptLines(core_id); /* Set CTLRs. */ if (core_id == 0) { - this->gicd->ctlr = 1; + m_gicd->ctlr = 1; } - this->gicc->ctlr = 1; + m_gicc->ctlr = 1; /* Set the mask for this core. */ SetGicMask(core_id); @@ -70,9 +70,9 @@ namespace ams::kern::arch::arm { void KInterruptController::Finalize(s32 core_id) { /* Clear CTLRs. */ if (core_id == 0) { - this->gicd->ctlr = 0; + m_gicd->ctlr = 0; } - this->gicc->ctlr = 0; + m_gicc->ctlr = 0; /* Set the priority level. */ SetPriorityLevel(PriorityLevel_High); @@ -85,27 +85,27 @@ namespace ams::kern::arch::arm { /* Save isenabler. */ for (size_t i = 0; i < util::size(state->isenabler); ++i) { constexpr size_t Offset = 0; - state->isenabler[i] = this->gicd->isenabler[i + Offset]; - this->gicd->isenabler[i + Offset] = 0xFFFFFFFF; + state->isenabler[i] = m_gicd->isenabler[i + Offset]; + m_gicd->isenabler[i + Offset] = 0xFFFFFFFF; } /* Save ipriorityr. */ for (size_t i = 0; i < util::size(state->ipriorityr); ++i) { constexpr size_t Offset = 0; - state->ipriorityr[i] = this->gicd->ipriorityr.words[i + Offset]; - this->gicd->ipriorityr.words[i + Offset] = 0xFFFFFFFF; + state->ipriorityr[i] = m_gicd->ipriorityr.words[i + Offset]; + m_gicd->ipriorityr.words[i + Offset] = 0xFFFFFFFF; } /* Save itargetsr. */ for (size_t i = 0; i < util::size(state->itargetsr); ++i) { constexpr size_t Offset = 0; - state->itargetsr[i] = this->gicd->itargetsr.words[i + Offset]; + state->itargetsr[i] = m_gicd->itargetsr.words[i + Offset]; } /* Save icfgr. */ for (size_t i = 0; i < util::size(state->icfgr); ++i) { constexpr size_t Offset = 0; - state->icfgr[i] = this->gicd->icfgr[i + Offset]; + state->icfgr[i] = m_gicd->icfgr[i + Offset]; } } @@ -113,27 +113,27 @@ namespace ams::kern::arch::arm { /* Save isenabler. */ for (size_t i = 0; i < util::size(state->isenabler); ++i) { constexpr size_t Offset = util::size(LocalState{}.isenabler); - state->isenabler[i] = this->gicd->isenabler[i + Offset]; - this->gicd->isenabler[i + Offset] = 0xFFFFFFFF; + state->isenabler[i] = m_gicd->isenabler[i + Offset]; + m_gicd->isenabler[i + Offset] = 0xFFFFFFFF; } /* Save ipriorityr. */ for (size_t i = 0; i < util::size(state->ipriorityr); ++i) { constexpr size_t Offset = util::size(LocalState{}.ipriorityr); - state->ipriorityr[i] = this->gicd->ipriorityr.words[i + Offset]; - this->gicd->ipriorityr.words[i + Offset] = 0xFFFFFFFF; + state->ipriorityr[i] = m_gicd->ipriorityr.words[i + Offset]; + m_gicd->ipriorityr.words[i + Offset] = 0xFFFFFFFF; } /* Save itargetsr. */ for (size_t i = 0; i < util::size(state->itargetsr); ++i) { constexpr size_t Offset = util::size(LocalState{}.itargetsr); - state->itargetsr[i] = this->gicd->itargetsr.words[i + Offset]; + state->itargetsr[i] = m_gicd->itargetsr.words[i + Offset]; } /* Save icfgr. */ for (size_t i = 0; i < util::size(state->icfgr); ++i) { constexpr size_t Offset = util::size(LocalState{}.icfgr); - state->icfgr[i] = this->gicd->icfgr[i + Offset]; + state->icfgr[i] = m_gicd->icfgr[i + Offset]; } } @@ -141,26 +141,26 @@ namespace ams::kern::arch::arm { /* Restore ipriorityr. */ for (size_t i = 0; i < util::size(state->ipriorityr); ++i) { constexpr size_t Offset = 0; - this->gicd->ipriorityr.words[i + Offset] = state->ipriorityr[i]; + m_gicd->ipriorityr.words[i + Offset] = state->ipriorityr[i]; } /* Restore itargetsr. */ for (size_t i = 0; i < util::size(state->itargetsr); ++i) { constexpr size_t Offset = 0; - this->gicd->itargetsr.words[i + Offset] = state->itargetsr[i]; + m_gicd->itargetsr.words[i + Offset] = state->itargetsr[i]; } /* Restore icfgr. */ for (size_t i = 0; i < util::size(state->icfgr); ++i) { constexpr size_t Offset = 0; - this->gicd->icfgr[i + Offset] = state->icfgr[i]; + m_gicd->icfgr[i + Offset] = state->icfgr[i]; } /* Restore isenabler. */ for (size_t i = 0; i < util::size(state->isenabler); ++i) { constexpr size_t Offset = 0; - this->gicd->icenabler[i + Offset] = 0xFFFFFFFF; - this->gicd->isenabler[i + Offset] = state->isenabler[i]; + m_gicd->icenabler[i + Offset] = 0xFFFFFFFF; + m_gicd->isenabler[i + Offset] = state->isenabler[i]; } } @@ -168,26 +168,26 @@ namespace ams::kern::arch::arm { /* Restore ipriorityr. */ for (size_t i = 0; i < util::size(state->ipriorityr); ++i) { constexpr size_t Offset = util::size(LocalState{}.ipriorityr); - this->gicd->ipriorityr.words[i + Offset] = state->ipriorityr[i]; + m_gicd->ipriorityr.words[i + Offset] = state->ipriorityr[i]; } /* Restore itargetsr. */ for (size_t i = 0; i < util::size(state->itargetsr); ++i) { constexpr size_t Offset = util::size(LocalState{}.itargetsr); - this->gicd->itargetsr.words[i + Offset] = state->itargetsr[i]; + m_gicd->itargetsr.words[i + Offset] = state->itargetsr[i]; } /* Restore icfgr. */ for (size_t i = 0; i < util::size(state->icfgr); ++i) { constexpr size_t Offset = util::size(LocalState{}.icfgr); - this->gicd->icfgr[i + Offset] = state->icfgr[i]; + m_gicd->icfgr[i + Offset] = state->icfgr[i]; } /* Restore isenabler. */ for (size_t i = 0; i < util::size(state->isenabler); ++i) { constexpr size_t Offset = util::size(LocalState{}.isenabler); - this->gicd->icenabler[i + Offset] = 0xFFFFFFFF; - this->gicd->isenabler[i + Offset] = state->isenabler[i]; + m_gicd->icenabler[i + Offset] = 0xFFFFFFFF; + m_gicd->isenabler[i + Offset] = state->isenabler[i]; } } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp index dd39f3b84..8b31ccb44 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp @@ -46,38 +46,38 @@ namespace ams::kern::arch::arm64::cpu { private: static inline KLightLock s_lock; private: - u64 counter; - s32 which; - bool done; + u64 m_counter; + s32 m_which; + bool m_done; public: - constexpr KPerformanceCounterInterruptHandler() : KInterruptHandler(), counter(), which(), done() { /* ... */ } + constexpr KPerformanceCounterInterruptHandler() : KInterruptHandler(), m_counter(), m_which(), m_done() { /* ... */ } static KLightLock &GetLock() { return s_lock; } void Setup(s32 w) { - this->done = false; - this->which = w; + m_done = false; + m_which = w; } void Wait() { - while (!this->done) { + while (!m_done) { cpu::Yield(); } } - u64 GetCounter() const { return this->counter; } + u64 GetCounter() const { return m_counter; } /* Nintendo misuses this per their own API, but it's functional. */ virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override { MESOSPHERE_UNUSED(interrupt_id); - if (this->which < 0) { - this->counter = cpu::GetCycleCounter(); + if (m_which < 0) { + m_counter = cpu::GetCycleCounter(); } else { - this->counter = cpu::GetPerformanceCounter(this->which); + m_counter = cpu::GetPerformanceCounter(m_which); } DataMemoryBarrier(); - this->done = true; + m_done = true; return nullptr; } }; @@ -93,11 +93,11 @@ namespace ams::kern::arch::arm64::cpu { FlushDataCache, }; private: - KLightLock lock; - KLightLock cv_lock; - KLightConditionVariable cv; - std::atomic target_cores; - volatile Operation operation; + KLightLock m_lock; + KLightLock m_cv_lock; + KLightConditionVariable m_cv; + std::atomic m_target_cores; + volatile Operation m_operation; private: static void ThreadFunction(uintptr_t _this) { reinterpret_cast(_this)->ThreadFunctionImpl(); @@ -108,9 +108,9 @@ namespace ams::kern::arch::arm64::cpu { while (true) { /* Wait for a request to come in. */ { - KScopedLightLock lk(this->cv_lock); - while ((this->target_cores & (1ul << core_id)) == 0) { - this->cv.Wait(std::addressof(this->cv_lock)); + KScopedLightLock lk(m_cv_lock); + while ((m_target_cores & (1ul << core_id)) == 0) { + m_cv.Wait(std::addressof(m_cv_lock)); } } @@ -119,9 +119,9 @@ namespace ams::kern::arch::arm64::cpu { /* Broadcast, if there's nothing pending. */ { - KScopedLightLock lk(this->cv_lock); - if (this->target_cores == 0) { - this->cv.Broadcast(); + KScopedLightLock lk(m_cv_lock); + if (m_target_cores == 0) { + m_cv.Broadcast(); } } } @@ -129,7 +129,7 @@ namespace ams::kern::arch::arm64::cpu { void ProcessOperation(); public: - constexpr KCacheHelperInterruptHandler() : KInterruptHandler(), lock(), cv_lock(), cv(), target_cores(), operation(Operation::Idle) { /* ... */ } + constexpr KCacheHelperInterruptHandler() : KInterruptHandler(), m_lock(), m_cv_lock(), m_cv(), m_target_cores(), m_operation(Operation::Idle) { /* ... */ } void Initialize(s32 core_id) { /* Reserve a thread from the system limit. */ @@ -154,7 +154,7 @@ namespace ams::kern::arch::arm64::cpu { } void RequestOperation(Operation op) { - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Create core masks for us to use. */ constexpr u64 AllCoresMask = (1ul << cpu::NumCores) - 1ul; @@ -162,48 +162,48 @@ namespace ams::kern::arch::arm64::cpu { if ((op == Operation::InstructionMemoryBarrier) || (Kernel::GetState() == Kernel::State::Initializing)) { /* Check that there's no on-going operation. */ - MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle); - MESOSPHERE_ABORT_UNLESS(this->target_cores == 0); + MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle); + MESOSPHERE_ABORT_UNLESS(m_target_cores == 0); /* Set operation. */ - this->operation = op; + m_operation = op; /* For certain operations, we want to send an interrupt. */ - this->target_cores = other_cores_mask; + m_target_cores = other_cores_mask; - const u64 target_mask = this->target_cores; + const u64 target_mask = m_target_cores; DataSynchronizationBarrier(); Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask); this->ProcessOperation(); - while (this->target_cores != 0) { + while (m_target_cores != 0) { cpu::Yield(); } /* Go idle again. */ - this->operation = Operation::Idle; + m_operation = Operation::Idle; } else { /* Lock condvar so that we can send and wait for acknowledgement of request. */ - KScopedLightLock cv_lk(this->cv_lock); + KScopedLightLock cv_lk(m_cv_lock); /* Check that there's no on-going operation. */ - MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle); - MESOSPHERE_ABORT_UNLESS(this->target_cores == 0); + MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle); + MESOSPHERE_ABORT_UNLESS(m_target_cores == 0); /* Set operation. */ - this->operation = op; + m_operation = op; /* Request all cores. */ - this->target_cores = AllCoresMask; + m_target_cores = AllCoresMask; /* Use the condvar. */ - this->cv.Broadcast(); - while (this->target_cores != 0) { - this->cv.Wait(std::addressof(this->cv_lock)); + m_cv.Broadcast(); + while (m_target_cores != 0) { + m_cv.Wait(std::addressof(m_cv_lock)); } /* Go idle again. */ - this->operation = Operation::Idle; + m_operation = Operation::Idle; } } }; @@ -283,7 +283,7 @@ namespace ams::kern::arch::arm64::cpu { } void KCacheHelperInterruptHandler::ProcessOperation() { - switch (this->operation) { + switch (m_operation) { case Operation::Idle: break; case Operation::InstructionMemoryBarrier: @@ -299,7 +299,7 @@ namespace ams::kern::arch::arm64::cpu { break; } - this->target_cores &= ~(1ul << GetCurrentCoreId()); + m_target_cores &= ~(1ul << GetCurrentCoreId()); } ALWAYS_INLINE void SetEventLocally() { diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp index fa42d6768..288771ca0 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp @@ -22,7 +22,7 @@ namespace ams::kern::arch::arm64 { InitializeGlobalTimer(); /* Set maximum time. */ - this->maximum_time = static_cast(std::min(std::numeric_limits::max(), cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor().GetCompareValue())); + m_maximum_time = static_cast(std::min(std::numeric_limits::max(), cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor().GetCompareValue())); /* Bind the interrupt task for this core. */ Kernel::GetInterruptManager().BindHandler(this, KInterruptName_NonSecurePhysicalTimer, GetCurrentCoreId(), KInterruptController::PriorityLevel_Timer, true, true); @@ -41,7 +41,7 @@ namespace ams::kern::arch::arm64 { /* Disable the timer interrupt while we handle this. */ DisableInterrupt(); - if (const s64 next_time = this->DoInterruptTaskImpl(GetTick()); 0 < next_time && next_time <= this->maximum_time) { + if (const s64 next_time = this->DoInterruptTaskImpl(GetTick()); 0 < next_time && next_time <= m_maximum_time) { /* We have a next time, so we should set the time to interrupt and turn the interrupt on. */ SetCompareValue(next_time); EnableInterrupt(); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp index a81aba58e..efb5e87a8 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp @@ -18,11 +18,11 @@ namespace ams::kern::arch::arm64 { void KInterruptManager::Initialize(s32 core_id) { - this->interrupt_controller.Initialize(core_id); + m_interrupt_controller.Initialize(core_id); } void KInterruptManager::Finalize(s32 core_id) { - this->interrupt_controller.Finalize(core_id); + m_interrupt_controller.Finalize(core_id); } void KInterruptManager::Save(s32 core_id) { @@ -34,18 +34,18 @@ namespace ams::kern::arch::arm64 { /* If on core 0, save the global interrupts. */ if (core_id == 0) { - MESOSPHERE_ABORT_UNLESS(!this->global_state_saved); - this->interrupt_controller.SaveGlobal(std::addressof(this->global_state)); - this->global_state_saved = true; + MESOSPHERE_ABORT_UNLESS(!m_global_state_saved); + m_interrupt_controller.SaveGlobal(std::addressof(m_global_state)); + m_global_state_saved = true; } /* Ensure all cores get to this point before continuing. */ cpu::SynchronizeAllCores(); /* Save all local interrupts. */ - MESOSPHERE_ABORT_UNLESS(!this->local_state_saved[core_id]); - this->interrupt_controller.SaveCoreLocal(std::addressof(this->local_states[core_id])); - this->local_state_saved[core_id] = true; + MESOSPHERE_ABORT_UNLESS(!m_local_state_saved[core_id]); + m_interrupt_controller.SaveCoreLocal(std::addressof(m_local_states[core_id])); + m_local_state_saved[core_id] = true; /* Ensure all cores get to this point before continuing. */ cpu::SynchronizeAllCores(); @@ -88,18 +88,18 @@ namespace ams::kern::arch::arm64 { cpu::SynchronizeAllCores(); /* Restore all local interrupts. */ - MESOSPHERE_ASSERT(this->local_state_saved[core_id]); - this->interrupt_controller.RestoreCoreLocal(std::addressof(this->local_states[core_id])); - this->local_state_saved[core_id] = false; + MESOSPHERE_ASSERT(m_local_state_saved[core_id]); + m_interrupt_controller.RestoreCoreLocal(std::addressof(m_local_states[core_id])); + m_local_state_saved[core_id] = false; /* Ensure all cores get to this point before continuing. */ cpu::SynchronizeAllCores(); /* If on core 0, restore the global interrupts. */ if (core_id == 0) { - MESOSPHERE_ASSERT(this->global_state_saved); - this->interrupt_controller.RestoreGlobal(std::addressof(this->global_state)); - this->global_state_saved = false; + MESOSPHERE_ASSERT(m_global_state_saved); + m_interrupt_controller.RestoreGlobal(std::addressof(m_global_state)); + m_global_state_saved = false; } /* Ensure all cores get to this point before continuing. */ @@ -108,7 +108,7 @@ namespace ams::kern::arch::arm64 { bool KInterruptManager::OnHandleInterrupt() { /* Get the interrupt id. */ - const u32 raw_irq = this->interrupt_controller.GetIrq(); + const u32 raw_irq = m_interrupt_controller.GetIrq(); const s32 irq = KInterruptController::ConvertRawIrq(raw_irq); /* Trace the interrupt. */ @@ -126,7 +126,7 @@ namespace ams::kern::arch::arm64 { if (entry.handler != nullptr) { /* Set manual clear needed if relevant. */ if (entry.manually_cleared) { - this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low); + m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low); entry.needs_clear = true; } @@ -143,7 +143,7 @@ namespace ams::kern::arch::arm64 { if (entry.handler != nullptr) { /* Set manual clear needed if relevant. */ if (entry.manually_cleared) { - this->interrupt_controller.Disable(irq); + m_interrupt_controller.Disable(irq); entry.needs_clear = true; } @@ -157,7 +157,7 @@ namespace ams::kern::arch::arm64 { } /* Acknowledge the interrupt. */ - this->interrupt_controller.EndOfInterrupt(raw_irq); + m_interrupt_controller.EndOfInterrupt(raw_irq); /* If we found no task, then we don't need to reschedule. */ if (task == nullptr) { @@ -273,16 +273,16 @@ namespace ams::kern::arch::arm64 { /* Configure the interrupt as level or edge. */ if (level) { - this->interrupt_controller.SetLevel(irq); + m_interrupt_controller.SetLevel(irq); } else { - this->interrupt_controller.SetEdge(irq); + m_interrupt_controller.SetEdge(irq); } /* Configure the interrupt. */ - this->interrupt_controller.Clear(irq); - this->interrupt_controller.SetTarget(irq, core_id); - this->interrupt_controller.SetPriorityLevel(irq, priority); - this->interrupt_controller.Enable(irq); + m_interrupt_controller.Clear(irq); + m_interrupt_controller.SetTarget(irq, core_id); + m_interrupt_controller.SetPriorityLevel(irq, priority); + m_interrupt_controller.Enable(irq); return ResultSuccess(); } @@ -303,19 +303,19 @@ namespace ams::kern::arch::arm64 { entry.priority = static_cast(priority); /* Configure the interrupt. */ - this->interrupt_controller.Clear(irq); - this->interrupt_controller.SetPriorityLevel(irq, priority); - this->interrupt_controller.Enable(irq); + m_interrupt_controller.Clear(irq); + m_interrupt_controller.SetPriorityLevel(irq, priority); + m_interrupt_controller.Enable(irq); return ResultSuccess(); } Result KInterruptManager::UnbindGlobal(s32 irq) { for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) { - this->interrupt_controller.ClearTarget(irq, static_cast(core_id)); + m_interrupt_controller.ClearTarget(irq, static_cast(core_id)); } - this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low); - this->interrupt_controller.Disable(irq); + m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low); + m_interrupt_controller.Disable(irq); GetGlobalInterruptEntry(irq).handler = nullptr; @@ -326,8 +326,8 @@ namespace ams::kern::arch::arm64 { auto &entry = this->GetLocalInterruptEntry(irq); R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState()); - this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low); - this->interrupt_controller.Disable(irq); + m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low); + m_interrupt_controller.Disable(irq); entry.handler = nullptr; @@ -345,7 +345,7 @@ namespace ams::kern::arch::arm64 { /* Clear and enable. */ entry.needs_clear = false; - this->interrupt_controller.Enable(irq); + m_interrupt_controller.Enable(irq); return ResultSuccess(); } @@ -360,7 +360,7 @@ namespace ams::kern::arch::arm64 { /* Clear and set priority. */ entry.needs_clear = false; - this->interrupt_controller.SetPriorityLevel(irq, entry.priority); + m_interrupt_controller.SetPriorityLevel(irq, entry.priority); return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index a79eaa1cd..188475ec0 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -21,13 +21,13 @@ namespace ams::kern::arch::arm64 { class AlignedMemoryBlock { private: - uintptr_t before_start; - uintptr_t before_end; - uintptr_t after_start; - uintptr_t after_end; - size_t current_alignment; + uintptr_t m_before_start; + uintptr_t m_before_end; + uintptr_t m_after_start; + uintptr_t m_after_end; + size_t m_current_alignment; public: - constexpr AlignedMemoryBlock(uintptr_t start, size_t num_pages, size_t alignment) : before_start(0), before_end(0), after_start(0), after_end(0), current_alignment(0) { + constexpr AlignedMemoryBlock(uintptr_t start, size_t num_pages, size_t alignment) : m_before_start(0), m_before_end(0), m_after_start(0), m_after_end(0), m_current_alignment(0) { MESOSPHERE_ASSERT(util::IsAligned(start, PageSize)); MESOSPHERE_ASSERT(num_pages > 0); @@ -38,41 +38,41 @@ namespace ams::kern::arch::arm64 { alignment = KPageTable::GetSmallerAlignment(alignment * PageSize) / PageSize; } - this->before_start = start_page; - this->before_end = util::AlignUp(start_page, alignment); - this->after_start = this->before_end; - this->after_end = start_page + num_pages; - this->current_alignment = alignment; - MESOSPHERE_ASSERT(this->current_alignment > 0); + m_before_start = start_page; + m_before_end = util::AlignUp(start_page, alignment); + m_after_start = m_before_end; + m_after_end = start_page + num_pages; + m_current_alignment = alignment; + MESOSPHERE_ASSERT(m_current_alignment > 0); } constexpr void SetAlignment(size_t alignment) { /* We can only ever decrease the granularity. */ - MESOSPHERE_ASSERT(this->current_alignment >= alignment / PageSize); - this->current_alignment = alignment / PageSize; + MESOSPHERE_ASSERT(m_current_alignment >= alignment / PageSize); + m_current_alignment = alignment / PageSize; } constexpr size_t GetAlignment() const { - return this->current_alignment * PageSize; + return m_current_alignment * PageSize; } constexpr void FindBlock(uintptr_t &out, size_t &num_pages) { - if ((this->after_end - this->after_start) >= this->current_alignment) { + if ((m_after_end - m_after_start) >= m_current_alignment) { /* Select aligned memory from after block. */ - const size_t available_pages = util::AlignDown(this->after_end, this->current_alignment) - this->after_start; + const size_t available_pages = util::AlignDown(m_after_end, m_current_alignment) - m_after_start; if (num_pages == 0 || available_pages < num_pages) { num_pages = available_pages; } - out = this->after_start * PageSize; - this->after_start += num_pages; - } else if ((this->before_end - this->before_start) >= this->current_alignment) { + out = m_after_start * PageSize; + m_after_start += num_pages; + } else if ((m_before_end - m_before_start) >= m_current_alignment) { /* Select aligned memory from before block. */ - const size_t available_pages = this->before_end - util::AlignUp(this->before_start, this->current_alignment); + const size_t available_pages = m_before_end - util::AlignUp(m_before_start, m_current_alignment); if (num_pages == 0 || available_pages < num_pages) { num_pages = available_pages; } - this->before_end -= num_pages; - out = this->before_end * PageSize; + m_before_end -= num_pages; + out = m_before_end * PageSize; } else { /* Neither after or before can get an aligned bit of memory. */ out = 0; @@ -95,32 +95,32 @@ namespace ams::kern::arch::arm64 { static constexpr size_t NumWords = AsidCount / BitsPerWord; static constexpr WordType FullWord = ~WordType(0u); private: - WordType state[NumWords]; - KLightLock lock; - u8 hint; + WordType m_state[NumWords]; + KLightLock m_lock; + u8 m_hint; private: constexpr bool TestImpl(u8 asid) const { - return this->state[asid / BitsPerWord] & (1u << (asid % BitsPerWord)); + return m_state[asid / BitsPerWord] & (1u << (asid % BitsPerWord)); } constexpr void ReserveImpl(u8 asid) { MESOSPHERE_ASSERT(!this->TestImpl(asid)); - this->state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord)); + m_state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord)); } constexpr void ReleaseImpl(u8 asid) { MESOSPHERE_ASSERT(this->TestImpl(asid)); - this->state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord)); + m_state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord)); } constexpr u8 FindAvailable() const { - for (size_t i = 0; i < util::size(this->state); i++) { - if (this->state[i] == FullWord) { + for (size_t i = 0; i < util::size(m_state); i++) { + if (m_state[i] == FullWord) { continue; } - const WordType clear_bit = (this->state[i] + 1) ^ (this->state[i]); + const WordType clear_bit = (m_state[i] + 1) ^ (m_state[i]); return BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit); } - if (this->state[util::size(this->state)-1] == FullWord) { + if (m_state[util::size(m_state)-1] == FullWord) { MESOSPHERE_PANIC("Unable to reserve ASID"); } __builtin_unreachable(); @@ -130,26 +130,26 @@ namespace ams::kern::arch::arm64 { return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType)); } public: - constexpr KPageTableAsidManager() : state(), lock(), hint() { + constexpr KPageTableAsidManager() : m_state(), m_lock(), m_hint() { for (size_t i = 0; i < NumReservedAsids; i++) { this->ReserveImpl(ReservedAsids[i]); } } u8 Reserve() { - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); - if (this->TestImpl(this->hint)) { - this->hint = this->FindAvailable(); + if (this->TestImpl(m_hint)) { + m_hint = this->FindAvailable(); } - this->ReserveImpl(this->hint); + this->ReserveImpl(m_hint); - return this->hint++; + return m_hint++; } void Release(u8 asid) { - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); this->ReleaseImpl(asid); } }; @@ -165,15 +165,15 @@ namespace ams::kern::arch::arm64 { Result KPageTable::InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end) { /* Initialize basic fields. */ - this->asid = 0; - this->manager = std::addressof(Kernel::GetPageTableManager()); + m_asid = 0; + m_manager = std::addressof(Kernel::GetPageTableManager()); /* Allocate a page for ttbr. */ - const u64 asid_tag = (static_cast(this->asid) << 48ul); - const KVirtualAddress page = this->manager->Allocate(); + const u64 asid_tag = (static_cast(m_asid) << 48ul); + const KVirtualAddress page = m_manager->Allocate(); MESOSPHERE_ASSERT(page != Null); cpu::ClearPageToZero(GetVoidPointer(page)); - this->ttbr = GetInteger(KPageTableBase::GetLinearMappedPhysicalAddress(page)) | asid_tag; + m_ttbr = GetInteger(KPageTableBase::GetLinearMappedPhysicalAddress(page)) | asid_tag; /* Initialize the base page table. */ MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end)); @@ -186,17 +186,17 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_UNUSED(id); /* Get an ASID */ - this->asid = g_asid_manager.Reserve(); - auto asid_guard = SCOPE_GUARD { g_asid_manager.Release(this->asid); }; + m_asid = g_asid_manager.Reserve(); + auto asid_guard = SCOPE_GUARD { g_asid_manager.Release(m_asid); }; /* Set our manager. */ - this->manager = pt_manager; + m_manager = pt_manager; /* Allocate a new table, and set our ttbr value. */ - const KVirtualAddress new_table = this->manager->Allocate(); + const KVirtualAddress new_table = m_manager->Allocate(); R_UNLESS(new_table != Null, svc::ResultOutOfResource()); - this->ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), asid); - auto table_guard = SCOPE_GUARD { this->manager->Free(new_table); }; + m_ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), m_asid); + auto table_guard = SCOPE_GUARD { m_manager->Free(new_table); }; /* Initialize our base table. */ const size_t as_width = GetAddressSpaceWidth(as_type); @@ -308,7 +308,7 @@ namespace ams::kern::arch::arm64 { } /* Release our asid. */ - g_asid_manager.Release(this->asid); + g_asid_manager.Release(m_asid); return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 49e857a2a..e77efcd91 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -18,19 +18,19 @@ namespace ams::kern::arch::arm64 { void KPageTableImpl::InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end) { - this->table = static_cast(tb); - this->is_kernel = true; - this->num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; + m_table = static_cast(tb); + m_is_kernel = true; + m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; } void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) { - this->table = static_cast(tb); - this->is_kernel = false; - this->num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; + m_table = static_cast(tb); + m_is_kernel = false; + m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; } L1PageTableEntry *KPageTableImpl::Finalize() { - return this->table; + return m_table; } bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const { @@ -119,21 +119,21 @@ namespace ams::kern::arch::arm64 { out_entry->phys_addr = Null; out_entry->block_size = L1BlockSize; out_entry->sw_reserved_bits = 0; - out_context->l1_entry = this->table + this->num_entries; + out_context->l1_entry = m_table + m_num_entries; out_context->l2_entry = nullptr; out_context->l3_entry = nullptr; /* Validate that we can read the actual entry. */ const size_t l0_index = GetL0Index(address); const size_t l1_index = GetL1Index(address); - if (this->is_kernel) { + if (m_is_kernel) { /* Kernel entries must be accessed via TTBR1. */ - if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - this->num_entries)) { + if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) { return false; } } else { /* User entries must be accessed with TTBR0. */ - if ((l0_index != 0) || l1_index >= this->num_entries) { + if ((l0_index != 0) || l1_index >= m_num_entries) { return false; } } @@ -212,15 +212,15 @@ namespace ams::kern::arch::arm64 { } } else { /* We need to update the l1 entry. */ - const size_t l1_index = context->l1_entry - this->table; - if (l1_index < this->num_entries) { + const size_t l1_index = context->l1_entry - m_table; + if (l1_index < m_num_entries) { valid = this->ExtractL1Entry(out_entry, context, context->l1_entry, Null); } else { /* Invalid, end traversal. */ out_entry->phys_addr = Null; out_entry->block_size = L1BlockSize; out_entry->sw_reserved_bits = 0; - context->l1_entry = this->table + this->num_entries; + context->l1_entry = m_table + m_num_entries; context->l2_entry = nullptr; context->l3_entry = nullptr; return false; @@ -262,14 +262,14 @@ namespace ams::kern::arch::arm64 { /* Validate that we can read the actual entry. */ const size_t l0_index = GetL0Index(address); const size_t l1_index = GetL1Index(address); - if (this->is_kernel) { + if (m_is_kernel) { /* Kernel entries must be accessed via TTBR1. */ - if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - this->num_entries)) { + if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) { return false; } } else { /* User entries must be accessed with TTBR0. */ - if ((l0_index != 0) || l1_index >= this->num_entries) { + if ((l0_index != 0) || l1_index >= m_num_entries) { return false; } } @@ -322,14 +322,14 @@ namespace ams::kern::arch::arm64 { /* Validate that we can read the actual entry. */ const size_t l0_index = GetL0Index(cur); const size_t l1_index = GetL1Index(cur); - if (this->is_kernel) { + if (m_is_kernel) { /* Kernel entries must be accessed via TTBR1. */ - if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - this->num_entries)) { + if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) { return; } } else { /* User entries must be accessed with TTBR0. */ - if ((l0_index != 0) || l1_index >= this->num_entries) { + if ((l0_index != 0) || l1_index >= m_num_entries) { return; } } @@ -482,8 +482,8 @@ namespace ams::kern::arch::arm64 { #if defined(MESOSPHERE_BUILD_FOR_DEBUGGING) { ++num_tables; - for (size_t l1_index = 0; l1_index < this->num_entries; ++l1_index) { - auto &l1_entry = this->table[l1_index]; + for (size_t l1_index = 0; l1_index < m_num_entries; ++l1_index) { + auto &l1_entry = m_table[l1_index]; if (l1_entry.IsTable()) { ++num_tables; for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) { diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp index 17d808e64..aec4229e4 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp @@ -19,7 +19,7 @@ namespace ams::kern::arch::arm64 { void KSupervisorPageTable::Initialize(s32 core_id) { /* Get the identity mapping ttbr0. */ - this->ttbr0_identity[core_id] = cpu::GetTtbr0El1(); + m_ttbr0_identity[core_id] = cpu::GetTtbr0El1(); /* Set sctlr_el1 */ cpu::SystemControlRegisterAccessor().SetWxn(true).Store(); @@ -35,7 +35,7 @@ namespace ams::kern::arch::arm64 { const u64 kernel_vaddr_start = 0xFFFFFF8000000000ul; const u64 kernel_vaddr_end = 0xFFFFFFFFFFE00000ul; void *table = GetVoidPointer(KPageTableBase::GetLinearMappedVirtualAddress(ttbr1)); - this->page_table.InitializeForKernel(table, kernel_vaddr_start, kernel_vaddr_end); + m_page_table.InitializeForKernel(table, kernel_vaddr_start, kernel_vaddr_end); } } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp index 04035e30d..f507c8a9f 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp @@ -117,38 +117,38 @@ namespace ams::kern::arch::arm64 { /* Determine LR and SP. */ if (is_user) { /* Usermode thread. */ - this->lr = reinterpret_cast(::ams::kern::arch::arm64::UserModeThreadStarter); - this->sp = SetupStackForUserModeThreadStarter(u_pc, k_sp, u_sp, arg, is_64_bit); + m_lr = reinterpret_cast(::ams::kern::arch::arm64::UserModeThreadStarter); + m_sp = SetupStackForUserModeThreadStarter(u_pc, k_sp, u_sp, arg, is_64_bit); } else { /* Kernel thread. */ MESOSPHERE_ASSERT(is_64_bit); if (is_main) { /* Main thread. */ - this->lr = GetInteger(u_pc); - this->sp = GetInteger(k_sp); + m_lr = GetInteger(u_pc); + m_sp = GetInteger(k_sp); } else { /* Generic Kernel thread. */ - this->lr = reinterpret_cast(::ams::kern::arch::arm64::SupervisorModeThreadStarter); - this->sp = SetupStackForSupervisorModeThreadStarter(u_pc, k_sp, arg); + m_lr = reinterpret_cast(::ams::kern::arch::arm64::SupervisorModeThreadStarter); + m_sp = SetupStackForSupervisorModeThreadStarter(u_pc, k_sp, arg); } } /* Clear callee-saved registers. */ - for (size_t i = 0; i < util::size(this->callee_saved.registers); i++) { - this->callee_saved.registers[i] = 0; + for (size_t i = 0; i < util::size(m_callee_saved.registers); i++) { + m_callee_saved.registers[i] = 0; } /* Clear FPU state. */ - this->fpcr = 0; - this->fpsr = 0; - this->cpacr = 0; - for (size_t i = 0; i < util::size(this->fpu_registers); i++) { - this->fpu_registers[i] = 0; + m_fpcr = 0; + m_fpsr = 0; + m_cpacr = 0; + for (size_t i = 0; i < util::size(m_fpu_registers); i++) { + m_fpu_registers[i] = 0; } /* Lock the context, if we're a main thread. */ - this->locked = is_main; + m_locked = is_main; return ResultSuccess(); } @@ -159,7 +159,7 @@ namespace ams::kern::arch::arm64 { } void KThreadContext::SetArguments(uintptr_t arg0, uintptr_t arg1) { - u64 *stack = reinterpret_cast(this->sp); + u64 *stack = reinterpret_cast(m_sp); stack[0] = arg0; stack[1] = arg1; } @@ -199,11 +199,11 @@ namespace ams::kern::arch::arm64 { void KThreadContext::SetFpuRegisters(const u128 *v, bool is_64_bit) { if (is_64_bit) { for (size_t i = 0; i < KThreadContext::NumFpuRegisters; ++i) { - this->fpu_registers[i] = v[i]; + m_fpu_registers[i] = v[i]; } } else { for (size_t i = 0; i < KThreadContext::NumFpuRegisters / 2; ++i) { - this->fpu_registers[i] = v[i]; + m_fpu_registers[i] = v[i]; } } } diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp index 0557bb305..3ca4b4c8b 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp @@ -210,10 +210,10 @@ namespace ams::kern::board::nintendo::nx { Bit_Readable = 31, }; private: - u32 value; + u32 m_value; protected: constexpr ALWAYS_INLINE u32 SelectBit(Bit n) const { - return (this->value & (1u << n)); + return (m_value & (1u << n)); } constexpr ALWAYS_INLINE bool GetBit(Bit n) const { @@ -231,7 +231,7 @@ namespace ams::kern::board::nintendo::nx { ALWAYS_INLINE void SetValue(u32 v) { /* Prevent re-ordering around entry modifications. */ __asm__ __volatile__("" ::: "memory"); - this->value = v; + m_value = v; __asm__ __volatile__("" ::: "memory"); } public: @@ -246,7 +246,7 @@ namespace ams::kern::board::nintendo::nx { constexpr ALWAYS_INLINE u32 GetAttributes() const { return this->SelectBit(Bit_NonSecure) | this->SelectBit(Bit_Writeable) | this->SelectBit(Bit_Readable); } - constexpr ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const { return (static_cast(this->value) << DevicePageBits) & PhysicalAddressMask; } + constexpr ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const { return (static_cast(m_value) << DevicePageBits) & PhysicalAddressMask; } ALWAYS_INLINE void Invalidate() { this->SetValue(0); } }; @@ -286,36 +286,36 @@ namespace ams::kern::board::nintendo::nx { static constexpr size_t NumWords = AsidCount / BitsPerWord; static constexpr WordType FullWord = ~WordType(0u); private: - WordType state[NumWords]; - KLightLock lock; + WordType m_state[NumWords]; + KLightLock m_lock; private: constexpr void ReserveImpl(u8 asid) { - this->state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord)); + m_state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord)); } constexpr void ReleaseImpl(u8 asid) { - this->state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord)); + m_state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord)); } static constexpr ALWAYS_INLINE WordType ClearLeadingZero(WordType value) { return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType)); } public: - constexpr KDeviceAsidManager() : state(), lock() { + constexpr KDeviceAsidManager() : m_state(), m_lock() { for (size_t i = 0; i < NumReservedAsids; i++) { this->ReserveImpl(ReservedAsids[i]); } } Result Reserve(u8 *out, size_t num_desired) { - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); MESOSPHERE_ASSERT(num_desired > 0); size_t num_reserved = 0; for (size_t i = 0; i < NumWords; i++) { - while (this->state[i] != FullWord) { - const WordType clear_bit = (this->state[i] + 1) ^ (this->state[i]); - this->state[i] |= clear_bit; + while (m_state[i] != FullWord) { + const WordType clear_bit = (m_state[i] + 1) ^ (m_state[i]); + m_state[i] |= clear_bit; out[num_reserved++] = static_cast(BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit)); R_SUCCEED_IF(num_reserved == num_desired); } @@ -329,7 +329,7 @@ namespace ams::kern::board::nintendo::nx { } void Release(u8 asid) { - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); this->ReleaseImpl(asid); } }; @@ -776,14 +776,14 @@ namespace ams::kern::board::nintendo::nx { /* Clear the tables. */ static_assert(TableCount == (1ul << DeviceVirtualAddressBits) / DeviceRegionSize); for (size_t i = 0; i < TableCount; ++i) { - this->tables[i] = Null; + m_tables[i] = Null; } /* Ensure that we clean up the tables on failure. */ auto table_guard = SCOPE_GUARD { for (size_t i = start_index; i <= end_index; ++i) { - if (this->tables[i] != Null && ptm.Close(this->tables[i], 1)) { - ptm.Free(this->tables[i]); + if (m_tables[i] != Null && ptm.Close(m_tables[i], 1)) { + ptm.Free(m_tables[i]); } } }; @@ -797,32 +797,32 @@ namespace ams::kern::board::nintendo::nx { ptm.Open(table_vaddr, 1); cpu::StoreDataCache(GetVoidPointer(table_vaddr), PageDirectorySize); - this->tables[i] = table_vaddr; + m_tables[i] = table_vaddr; } /* Clear asids. */ for (size_t i = 0; i < TableCount; ++i) { - this->table_asids[i] = g_reserved_asid; + m_table_asids[i] = g_reserved_asid; } /* Reserve asids for the tables. */ - R_TRY(g_asid_manager.Reserve(std::addressof(this->table_asids[start_index]), end_index - start_index + 1)); + R_TRY(g_asid_manager.Reserve(std::addressof(m_table_asids[start_index]), end_index - start_index + 1)); /* Associate tables with asids. */ for (size_t i = start_index; i <= end_index; ++i) { - SetTable(this->table_asids[i], GetPageTablePhysicalAddress(this->tables[i])); + SetTable(m_table_asids[i], GetPageTablePhysicalAddress(m_tables[i])); } /* Set member variables. */ - this->attached_device = 0; - this->attached_value = (1u << 31) | this->table_asids[0]; - this->detached_value = (1u << 31) | g_reserved_asid; + m_attached_device = 0; + m_attached_value = (1u << 31) | m_table_asids[0]; + m_detached_value = (1u << 31) | g_reserved_asid; - this->hs_attached_value = (1u << 31); - this->hs_detached_value = (1u << 31); + m_hs_attached_value = (1u << 31); + m_hs_detached_value = (1u << 31); for (size_t i = 0; i < TableCount; ++i) { - this->hs_attached_value |= (this->table_asids[i] << (i * BITSIZEOF(u8))); - this->hs_detached_value |= (g_reserved_asid << (i * BITSIZEOF(u8))); + m_hs_attached_value |= (m_table_asids[i] << (i * BITSIZEOF(u8))); + m_hs_detached_value |= (g_reserved_asid << (i * BITSIZEOF(u8))); } /* We succeeded. */ @@ -839,8 +839,8 @@ namespace ams::kern::board::nintendo::nx { KScopedLightLock lk(g_lock); for (size_t i = 0; i < ams::svc::DeviceName_Count; ++i) { const auto device_name = static_cast(i); - if ((this->attached_device & (1ul << device_name)) != 0) { - WriteMcRegister(GetDeviceAsidRegisterOffset(device_name), IsHsSupported(device_name) ? this->hs_detached_value : this->detached_value); + if ((m_attached_device & (1ul << device_name)) != 0) { + WriteMcRegister(GetDeviceAsidRegisterOffset(device_name), IsHsSupported(device_name) ? m_hs_detached_value : m_detached_value); SmmuSynchronizationBarrier(); } } @@ -851,12 +851,12 @@ namespace ams::kern::board::nintendo::nx { /* Release all asids. */ for (size_t i = 0; i < TableCount; ++i) { - if (this->table_asids[i] != g_reserved_asid) { + if (m_table_asids[i] != g_reserved_asid) { /* Set the table to the reserved table. */ - SetTable(this->table_asids[i], g_reserved_table_phys_addr); + SetTable(m_table_asids[i], g_reserved_table_phys_addr); /* Close the table. */ - const KVirtualAddress table_vaddr = this->tables[i]; + const KVirtualAddress table_vaddr = m_tables[i]; MESOSPHERE_ASSERT(ptm.GetRefCount(table_vaddr) == 1); MESOSPHERE_ABORT_UNLESS(ptm.Close(table_vaddr, 1)); @@ -864,7 +864,7 @@ namespace ams::kern::board::nintendo::nx { ptm.Free(table_vaddr); /* Release the asid. */ - g_asid_manager.Release(this->table_asids[i]); + g_asid_manager.Release(m_table_asids[i]); } } } @@ -875,7 +875,7 @@ namespace ams::kern::board::nintendo::nx { R_UNLESS(device_name < ams::svc::DeviceName_Count, svc::ResultNotFound()); /* Check that the device isn't already attached. */ - R_UNLESS((this->attached_device & (1ul << device_name)) == 0, svc::ResultBusy()); + R_UNLESS((m_attached_device & (1ul << device_name)) == 0, svc::ResultBusy()); /* Validate that the space is allowed for the device. */ const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize; @@ -889,8 +889,8 @@ namespace ams::kern::board::nintendo::nx { R_UNLESS(reg_offset >= 0, svc::ResultNotFound()); /* Determine the old/new values. */ - const u32 old_val = IsHsSupported(device_name) ? this->hs_detached_value : this->detached_value; - const u32 new_val = IsHsSupported(device_name) ? this->hs_attached_value : this->attached_value; + const u32 old_val = IsHsSupported(device_name) ? m_hs_detached_value : m_detached_value; + const u32 new_val = IsHsSupported(device_name) ? m_hs_attached_value : m_attached_value; /* Attach the device. */ { @@ -912,7 +912,7 @@ namespace ams::kern::board::nintendo::nx { } /* Mark the device as attached. */ - this->attached_device |= (1ul << device_name); + m_attached_device |= (1ul << device_name); return ResultSuccess(); } @@ -923,15 +923,15 @@ namespace ams::kern::board::nintendo::nx { R_UNLESS(device_name < ams::svc::DeviceName_Count, svc::ResultNotFound()); /* Check that the device is already attached. */ - R_UNLESS((this->attached_device & (1ul << device_name)) != 0, svc::ResultInvalidState()); + R_UNLESS((m_attached_device & (1ul << device_name)) != 0, svc::ResultInvalidState()); /* Get the device asid register offset. */ const int reg_offset = GetDeviceAsidRegisterOffset(device_name); R_UNLESS(reg_offset >= 0, svc::ResultNotFound()); /* Determine the old/new values. */ - const u32 old_val = IsHsSupported(device_name) ? this->hs_attached_value : this->attached_value; - const u32 new_val = IsHsSupported(device_name) ? this->hs_detached_value : this->detached_value; + const u32 old_val = IsHsSupported(device_name) ? m_hs_attached_value : m_attached_value; + const u32 new_val = IsHsSupported(device_name) ? m_hs_detached_value : m_detached_value; /* When not building for debug, the old value might be unused. */ AMS_UNUSED(old_val); @@ -952,7 +952,7 @@ namespace ams::kern::board::nintendo::nx { } /* Mark the device as detached. */ - this->attached_device &= ~(1ul << device_name); + m_attached_device &= ~(1ul << device_name); return ResultSuccess(); } @@ -968,7 +968,7 @@ namespace ams::kern::board::nintendo::nx { const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize; const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize; - const PageDirectoryEntry *l1 = GetPointer(this->tables[l0_index]); + const PageDirectoryEntry *l1 = GetPointer(m_tables[l0_index]); if (l1 == nullptr || !l1[l1_index].IsValid()) { const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index; const size_t map_count = std::min(remaining_in_entry, remaining / DevicePageSize); @@ -1023,7 +1023,7 @@ namespace ams::kern::board::nintendo::nx { const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize; /* Get and validate l1. */ - PageDirectoryEntry *l1 = GetPointer(this->tables[l0_index]); + PageDirectoryEntry *l1 = GetPointer(m_tables[l0_index]); MESOSPHERE_ASSERT(l1 != nullptr); /* Setup an l1 table/entry, if needed. */ @@ -1039,7 +1039,7 @@ namespace ams::kern::board::nintendo::nx { /* Synchronize. */ InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index])))); - InvalidateTlbSection(this->table_asids[l0_index], address); + InvalidateTlbSection(m_table_asids[l0_index], address); SmmuSynchronizationBarrier(); /* Open references to the pages. */ @@ -1066,7 +1066,7 @@ namespace ams::kern::board::nintendo::nx { /* Synchronize. */ InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index])))); - InvalidateTlbSection(this->table_asids[l0_index], address); + InvalidateTlbSection(m_table_asids[l0_index], address); SmmuSynchronizationBarrier(); /* Increment the page table count. */ @@ -1100,7 +1100,7 @@ namespace ams::kern::board::nintendo::nx { } /* Synchronize. */ - InvalidateTlbSection(this->table_asids[l0_index], address); + InvalidateTlbSection(m_table_asids[l0_index], address); SmmuSynchronizationBarrier(); /* Open references to the pages. */ @@ -1181,7 +1181,7 @@ namespace ams::kern::board::nintendo::nx { const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize; /* Get and validate l1. */ - PageDirectoryEntry *l1 = GetPointer(this->tables[l0_index]); + PageDirectoryEntry *l1 = GetPointer(m_tables[l0_index]); /* Check if there's nothing mapped at l1. */ if (l1 == nullptr || !l1[l1_index].IsValid()) { @@ -1242,7 +1242,7 @@ namespace ams::kern::board::nintendo::nx { /* Synchronize. */ InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index])))); - InvalidateTlbSection(this->table_asids[l0_index], address); + InvalidateTlbSection(m_table_asids[l0_index], address); SmmuSynchronizationBarrier(); /* We invalidated the tlb. */ @@ -1254,7 +1254,7 @@ namespace ams::kern::board::nintendo::nx { /* Invalidate the tlb if we haven't already. */ if (!invalidated_tlb) { - InvalidateTlbSection(this->table_asids[l0_index], address); + InvalidateTlbSection(m_table_asids[l0_index], address); SmmuSynchronizationBarrier(); } @@ -1275,7 +1275,7 @@ namespace ams::kern::board::nintendo::nx { /* Synchronize. */ InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index])))); - InvalidateTlbSection(this->table_asids[l0_index], address); + InvalidateTlbSection(m_table_asids[l0_index], address); SmmuSynchronizationBarrier(); /* Close references. */ @@ -1305,7 +1305,7 @@ namespace ams::kern::board::nintendo::nx { const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize; /* Get and validate l1. */ - const PageDirectoryEntry *l1 = GetPointer(this->tables[l0_index]); + const PageDirectoryEntry *l1 = GetPointer(m_tables[l0_index]); R_UNLESS(l1 != nullptr, svc::ResultInvalidCurrentMemory()); R_UNLESS(l1[l1_index].IsValid(), svc::ResultInvalidCurrentMemory()); diff --git a/libraries/libmesosphere/source/kern_k_address_arbiter.cpp b/libraries/libmesosphere/source/kern_k_address_arbiter.cpp index c223ff95f..fa554ea25 100644 --- a/libraries/libmesosphere/source/kern_k_address_arbiter.cpp +++ b/libraries/libmesosphere/source/kern_k_address_arbiter.cpp @@ -51,15 +51,15 @@ namespace ams::kern { { KScopedSchedulerLock sl; - auto it = this->tree.nfind_light({ addr, -1 }); - while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { + auto it = m_tree.nfind_light({ addr, -1 }); + while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { KThread *target_thread = std::addressof(*it); target_thread->SetSyncedObject(nullptr, ResultSuccess()); AMS_ASSERT(target_thread->IsWaitingForAddressArbiter()); target_thread->Wakeup(); - it = this->tree.erase(it); + it = m_tree.erase(it); target_thread->ClearAddressArbiter(); ++num_waiters; } @@ -78,15 +78,15 @@ namespace ams::kern { R_UNLESS(UpdateIfEqual(std::addressof(user_value), addr, value, value + 1), svc::ResultInvalidCurrentMemory()); R_UNLESS(user_value == value, svc::ResultInvalidState()); - auto it = this->tree.nfind_light({ addr, -1 }); - while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { + auto it = m_tree.nfind_light({ addr, -1 }); + while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { KThread *target_thread = std::addressof(*it); target_thread->SetSyncedObject(nullptr, ResultSuccess()); AMS_ASSERT(target_thread->IsWaitingForAddressArbiter()); target_thread->Wakeup(); - it = this->tree.erase(it); + it = m_tree.erase(it); target_thread->ClearAddressArbiter(); ++num_waiters; } @@ -100,21 +100,21 @@ namespace ams::kern { { KScopedSchedulerLock sl; - auto it = this->tree.nfind_light({ addr, -1 }); + auto it = m_tree.nfind_light({ addr, -1 }); /* Determine the updated value. */ s32 new_value; if (GetTargetFirmware() >= TargetFirmware_7_0_0) { if (count <= 0) { - if ((it != this->tree.end()) && (it->GetAddressArbiterKey() == addr)) { + if ((it != m_tree.end()) && (it->GetAddressArbiterKey() == addr)) { new_value = value - 2; } else { new_value = value + 1; } } else { - if ((it != this->tree.end()) && (it->GetAddressArbiterKey() == addr)) { + if ((it != m_tree.end()) && (it->GetAddressArbiterKey() == addr)) { auto tmp_it = it; s32 tmp_num_waiters = 0; - while ((++tmp_it != this->tree.end()) && (tmp_it->GetAddressArbiterKey() == addr)) { + while ((++tmp_it != m_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr)) { if ((tmp_num_waiters++) >= count) { break; } @@ -131,7 +131,7 @@ namespace ams::kern { } } else { if (count <= 0) { - if ((it != this->tree.end()) && (it->GetAddressArbiterKey() == addr)) { + if ((it != m_tree.end()) && (it->GetAddressArbiterKey() == addr)) { new_value = value - 1; } else { new_value = value + 1; @@ -139,7 +139,7 @@ namespace ams::kern { } else { auto tmp_it = it; s32 tmp_num_waiters = 0; - while ((tmp_it != this->tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) && (tmp_num_waiters < count + 1)) { + while ((tmp_it != m_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) && (tmp_num_waiters < count + 1)) { ++tmp_num_waiters; ++tmp_it; } @@ -166,14 +166,14 @@ namespace ams::kern { R_UNLESS(succeeded, svc::ResultInvalidCurrentMemory()); R_UNLESS(user_value == value, svc::ResultInvalidState()); - while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { + while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { KThread *target_thread = std::addressof(*it); target_thread->SetSyncedObject(nullptr, ResultSuccess()); AMS_ASSERT(target_thread->IsWaitingForAddressArbiter()); target_thread->Wakeup(); - it = this->tree.erase(it); + it = m_tree.erase(it); target_thread->ClearAddressArbiter(); ++num_waiters; } @@ -225,8 +225,8 @@ namespace ams::kern { } /* Set the arbiter. */ - cur_thread->SetAddressArbiter(std::addressof(this->tree), addr); - this->tree.insert(*cur_thread); + cur_thread->SetAddressArbiter(std::addressof(m_tree), addr); + m_tree.insert(*cur_thread); cur_thread->SetState(KThread::ThreadState_Waiting); } @@ -240,7 +240,7 @@ namespace ams::kern { KScopedSchedulerLock sl; if (cur_thread->IsWaitingForAddressArbiter()) { - this->tree.erase(this->tree.iterator_to(*cur_thread)); + m_tree.erase(m_tree.iterator_to(*cur_thread)); cur_thread->ClearAddressArbiter(); } } @@ -287,8 +287,8 @@ namespace ams::kern { } /* Set the arbiter. */ - cur_thread->SetAddressArbiter(std::addressof(this->tree), addr); - this->tree.insert(*cur_thread); + cur_thread->SetAddressArbiter(std::addressof(m_tree), addr); + m_tree.insert(*cur_thread); cur_thread->SetState(KThread::ThreadState_Waiting); } @@ -302,7 +302,7 @@ namespace ams::kern { KScopedSchedulerLock sl; if (cur_thread->IsWaitingForAddressArbiter()) { - this->tree.erase(this->tree.iterator_to(*cur_thread)); + m_tree.erase(m_tree.iterator_to(*cur_thread)); cur_thread->ClearAddressArbiter(); } } diff --git a/libraries/libmesosphere/source/kern_k_auto_object.cpp b/libraries/libmesosphere/source/kern_k_auto_object.cpp index 5a023d5bb..9ce0e58b9 100644 --- a/libraries/libmesosphere/source/kern_k_auto_object.cpp +++ b/libraries/libmesosphere/source/kern_k_auto_object.cpp @@ -18,7 +18,7 @@ namespace ams::kern { KAutoObject *KAutoObject::Create(KAutoObject *obj) { - obj->ref_count = 1; + obj->m_ref_count = 1; return obj; } diff --git a/libraries/libmesosphere/source/kern_k_auto_object_container.cpp b/libraries/libmesosphere/source/kern_k_auto_object_container.cpp index 5fbdacca9..3dcbc173d 100644 --- a/libraries/libmesosphere/source/kern_k_auto_object_container.cpp +++ b/libraries/libmesosphere/source/kern_k_auto_object_container.cpp @@ -21,27 +21,27 @@ namespace ams::kern { void KAutoObjectWithListContainer::Register(KAutoObjectWithList *obj) { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); - this->object_list.insert(*obj); + m_object_list.insert(*obj); } void KAutoObjectWithListContainer::Unregister(KAutoObjectWithList *obj) { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); - this->object_list.erase(this->object_list.iterator_to(*obj)); + m_object_list.erase(m_object_list.iterator_to(*obj)); } size_t KAutoObjectWithListContainer::GetOwnedCount(KProcess *owner) { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); size_t count = 0; - for (auto &obj : this->object_list) { + for (auto &obj : m_object_list) { if (obj.GetOwner() == owner) { count++; } diff --git a/libraries/libmesosphere/source/kern_k_capabilities.cpp b/libraries/libmesosphere/source/kern_k_capabilities.cpp index c7abfc4b7..a8d2c550e 100644 --- a/libraries/libmesosphere/source/kern_k_capabilities.cpp +++ b/libraries/libmesosphere/source/kern_k_capabilities.cpp @@ -22,15 +22,15 @@ namespace ams::kern { /* Most fields have already been cleared by our constructor. */ /* Initial processes may run on all cores. */ - this->core_mask = (1ul << cpu::NumCores) - 1; + m_core_mask = (1ul << cpu::NumCores) - 1; /* Initial processes may use any user priority they like. */ - this->priority_mask = ~0xFul; + m_priority_mask = ~0xFul; /* Here, Nintendo sets the kernel version to the current kernel version. */ /* We will follow suit and set the version to the highest supported kernel version. */ - this->intended_kernel_version.Set(ams::svc::SupportedKernelMajorVersion); - this->intended_kernel_version.Set(ams::svc::SupportedKernelMinorVersion); + m_intended_kernel_version.Set(ams::svc::SupportedKernelMajorVersion); + m_intended_kernel_version.Set(ams::svc::SupportedKernelMinorVersion); /* Parse the capabilities array. */ return this->SetCapabilities(caps, num_caps, page_table); @@ -46,8 +46,8 @@ namespace ams::kern { Result KCapabilities::SetCorePriorityCapability(const util::BitPack32 cap) { /* We can't set core/priority if we've already set them. */ - R_UNLESS(this->core_mask == 0, svc::ResultInvalidArgument()); - R_UNLESS(this->priority_mask == 0, svc::ResultInvalidArgument()); + R_UNLESS(m_core_mask == 0, svc::ResultInvalidArgument()); + R_UNLESS(m_priority_mask == 0, svc::ResultInvalidArgument()); /* Validate the core/priority. */ const auto min_core = cap.Get(); @@ -64,18 +64,18 @@ namespace ams::kern { /* Set core mask. */ for (auto core_id = min_core; core_id <= max_core; core_id++) { - this->core_mask |= (1ul << core_id); + m_core_mask |= (1ul << core_id); } - MESOSPHERE_ASSERT((this->core_mask & ((1ul << cpu::NumCores) - 1)) == this->core_mask); + MESOSPHERE_ASSERT((m_core_mask & ((1ul << cpu::NumCores) - 1)) == m_core_mask); /* Set priority mask. */ for (auto prio = min_prio; prio <= max_prio; prio++) { - this->priority_mask |= (1ul << prio); + m_priority_mask |= (1ul << prio); } /* We must have some core/priority we can use. */ - R_UNLESS(this->core_mask != 0, svc::ResultInvalidArgument()); - R_UNLESS(this->priority_mask != 0, svc::ResultInvalidArgument()); + R_UNLESS(m_core_mask != 0, svc::ResultInvalidArgument()); + R_UNLESS(m_priority_mask != 0, svc::ResultInvalidArgument()); return ResultSuccess(); } @@ -186,17 +186,17 @@ namespace ams::kern { /* Validate. */ R_UNLESS(cap.Get() == 0, svc::ResultReservedUsed()); - this->program_type = cap.Get(); + m_program_type = cap.Get(); return ResultSuccess(); } Result KCapabilities::SetKernelVersionCapability(const util::BitPack32 cap) { /* Ensure we haven't set our version before. */ - R_UNLESS(this->intended_kernel_version.Get() == 0, svc::ResultInvalidArgument()); + R_UNLESS(m_intended_kernel_version.Get() == 0, svc::ResultInvalidArgument()); /* Set, ensure that we set a valid version. */ - this->intended_kernel_version = cap; - R_UNLESS(this->intended_kernel_version.Get() != 0, svc::ResultInvalidArgument()); + m_intended_kernel_version = cap; + R_UNLESS(m_intended_kernel_version.Get() != 0, svc::ResultInvalidArgument()); return ResultSuccess(); } @@ -205,7 +205,7 @@ namespace ams::kern { /* Validate. */ R_UNLESS(cap.Get() == 0, svc::ResultReservedUsed()); - this->handle_table_size = cap.Get(); + m_handle_table_size = cap.Get(); return ResultSuccess(); } @@ -213,8 +213,8 @@ namespace ams::kern { /* Validate. */ R_UNLESS(cap.Get() == 0, svc::ResultReservedUsed()); - this->debug_capabilities.Set(cap.Get()); - this->debug_capabilities.Set(cap.Get()); + m_debug_capabilities.Set(cap.Get()); + m_debug_capabilities.Set(cap.Get()); return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/kern_k_client_port.cpp b/libraries/libmesosphere/source/kern_k_client_port.cpp index 97c023ce6..963a0bafd 100644 --- a/libraries/libmesosphere/source/kern_k_client_port.cpp +++ b/libraries/libmesosphere/source/kern_k_client_port.cpp @@ -19,17 +19,17 @@ namespace ams::kern { void KClientPort::Initialize(KPort *parent, s32 max_sessions) { /* Set member variables. */ - this->num_sessions = 0; - this->peak_sessions = 0; - this->parent = parent; - this->max_sessions = max_sessions; + m_num_sessions = 0; + m_peak_sessions = 0; + m_parent = parent; + m_max_sessions = max_sessions; } void KClientPort::OnSessionFinalized() { KScopedSchedulerLock sl; - const auto prev = this->num_sessions--; - if (prev == this->max_sessions) { + const auto prev = m_num_sessions--; + if (prev == m_max_sessions) { this->NotifyAvailable(); } } @@ -44,15 +44,15 @@ namespace ams::kern { void KClientPort::Destroy() { /* Note with our parent that we're closed. */ - this->parent->OnClientClosed(); + m_parent->OnClientClosed(); /* Close our reference to our parent. */ - this->parent->Close(); + m_parent->Close(); } bool KClientPort::IsSignaled() const { MESOSPHERE_ASSERT_THIS(); - return this->num_sessions < this->max_sessions; + return m_num_sessions < m_max_sessions; } Result KClientPort::CreateSession(KClientSession **out) { @@ -67,23 +67,23 @@ namespace ams::kern { /* Atomically increment the number of sessions. */ s32 new_sessions; { - const auto max = this->max_sessions; - auto cur_sessions = this->num_sessions.load(std::memory_order_acquire); + const auto max = m_max_sessions; + auto cur_sessions = m_num_sessions.load(std::memory_order_acquire); do { R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions()); new_sessions = cur_sessions + 1; - } while (!this->num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed)); + } while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed)); } /* Atomically update the peak session tracking. */ { - auto peak = this->peak_sessions.load(std::memory_order_acquire); + auto peak = m_peak_sessions.load(std::memory_order_acquire); do { if (peak >= new_sessions) { break; } - } while (!this->peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed)); + } while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed)); } } @@ -91,8 +91,8 @@ namespace ams::kern { KSession *session = KSession::Create(); if (session == nullptr) { /* Decrement the session count. */ - const auto prev = this->num_sessions--; - if (prev == this->max_sessions) { + const auto prev = m_num_sessions--; + if (prev == m_max_sessions) { this->NotifyAvailable(); } @@ -100,7 +100,7 @@ namespace ams::kern { } /* Initialize the session. */ - session->Initialize(this, this->parent->GetName()); + session->Initialize(this, m_parent->GetName()); /* Commit the session reservation. */ session_reservation.Commit(); @@ -113,7 +113,7 @@ namespace ams::kern { }; /* Enqueue the session with our parent. */ - R_TRY(this->parent->EnqueueSession(std::addressof(session->GetServerSession()))); + R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession()))); /* We succeeded, so set the output. */ session_guard.Cancel(); @@ -133,23 +133,23 @@ namespace ams::kern { /* Atomically increment the number of sessions. */ s32 new_sessions; { - const auto max = this->max_sessions; - auto cur_sessions = this->num_sessions.load(std::memory_order_acquire); + const auto max = m_max_sessions; + auto cur_sessions = m_num_sessions.load(std::memory_order_acquire); do { R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions()); new_sessions = cur_sessions + 1; - } while (!this->num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed)); + } while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed)); } /* Atomically update the peak session tracking. */ { - auto peak = this->peak_sessions.load(std::memory_order_acquire); + auto peak = m_peak_sessions.load(std::memory_order_acquire); do { if (peak >= new_sessions) { break; } - } while (!this->peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed)); + } while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed)); } } @@ -157,8 +157,8 @@ namespace ams::kern { KLightSession *session = KLightSession::Create(); if (session == nullptr) { /* Decrement the session count. */ - const auto prev = this->num_sessions--; - if (prev == this->max_sessions) { + const auto prev = m_num_sessions--; + if (prev == m_max_sessions) { this->NotifyAvailable(); } @@ -166,7 +166,7 @@ namespace ams::kern { } /* Initialize the session. */ - session->Initialize(this, this->parent->GetName()); + session->Initialize(this, m_parent->GetName()); /* Commit the session reservation. */ session_reservation.Commit(); @@ -179,7 +179,7 @@ namespace ams::kern { }; /* Enqueue the session with our parent. */ - R_TRY(this->parent->EnqueueSession(std::addressof(session->GetServerSession()))); + R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession()))); /* We succeeded, so set the output. */ session_guard.Cancel(); diff --git a/libraries/libmesosphere/source/kern_k_client_session.cpp b/libraries/libmesosphere/source/kern_k_client_session.cpp index 7f164a5c0..569d9ac29 100644 --- a/libraries/libmesosphere/source/kern_k_client_session.cpp +++ b/libraries/libmesosphere/source/kern_k_client_session.cpp @@ -20,8 +20,8 @@ namespace ams::kern { void KClientSession::Destroy() { MESOSPHERE_ASSERT_THIS(); - this->parent->OnClientClosed(); - this->parent->Close(); + m_parent->OnClientClosed(); + m_parent->Close(); } void KClientSession::OnServerClosed() { @@ -45,7 +45,7 @@ namespace ams::kern { GetCurrentThread().SetSyncedObject(nullptr, ResultSuccess()); - R_TRY(this->parent->OnRequest(request)); + R_TRY(m_parent->OnRequest(request)); } /* Get the result. */ @@ -68,7 +68,7 @@ namespace ams::kern { { KScopedSchedulerLock sl; - R_TRY(this->parent->OnRequest(request)); + R_TRY(m_parent->OnRequest(request)); } return ResultSuccess(); diff --git a/libraries/libmesosphere/source/kern_k_code_memory.cpp b/libraries/libmesosphere/source/kern_k_code_memory.cpp index 3dcd101b4..e1b027783 100644 --- a/libraries/libmesosphere/source/kern_k_code_memory.cpp +++ b/libraries/libmesosphere/source/kern_k_code_memory.cpp @@ -21,31 +21,31 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Set members. */ - this->owner = GetCurrentProcessPointer(); + m_owner = GetCurrentProcessPointer(); /* Initialize the page group. */ - auto &page_table = this->owner->GetPageTable(); - new (GetPointer(this->page_group)) KPageGroup(page_table.GetBlockInfoManager()); + auto &page_table = m_owner->GetPageTable(); + new (GetPointer(m_page_group)) KPageGroup(page_table.GetBlockInfoManager()); /* Ensure that our page group's state is valid on exit. */ - auto pg_guard = SCOPE_GUARD { GetReference(this->page_group).~KPageGroup(); }; + auto pg_guard = SCOPE_GUARD { GetReference(m_page_group).~KPageGroup(); }; /* Lock the memory. */ - R_TRY(page_table.LockForCodeMemory(GetPointer(this->page_group), addr, size)); + R_TRY(page_table.LockForCodeMemory(GetPointer(m_page_group), addr, size)); /* Clear the memory. */ - for (const auto &block : GetReference(this->page_group)) { + for (const auto &block : GetReference(m_page_group)) { /* Clear and store cache. */ std::memset(GetVoidPointer(block.GetAddress()), 0xFF, block.GetSize()); cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize()); } /* Set remaining tracking members. */ - this->owner->Open(); - this->address = addr; - this->is_initialized = true; - this->is_owner_mapped = false; - this->is_mapped = false; + m_owner->Open(); + m_address = addr; + m_is_initialized = true; + m_is_owner_mapped = false; + m_is_mapped = false; /* We succeeded. */ pg_guard.Cancel(); @@ -56,17 +56,17 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Unlock. */ - if (!this->is_mapped && !this->is_owner_mapped) { - const size_t size = GetReference(this->page_group).GetNumPages() * PageSize; - MESOSPHERE_R_ABORT_UNLESS(this->owner->GetPageTable().UnlockForCodeMemory(this->address, size, GetReference(this->page_group))); + if (!m_is_mapped && !m_is_owner_mapped) { + const size_t size = GetReference(m_page_group).GetNumPages() * PageSize; + MESOSPHERE_R_ABORT_UNLESS(m_owner->GetPageTable().UnlockForCodeMemory(m_address, size, GetReference(m_page_group))); } /* Close the page group. */ - GetReference(this->page_group).Close(); - GetReference(this->page_group).Finalize(); + GetReference(m_page_group).Close(); + GetReference(m_page_group).Finalize(); /* Close our reference to our owner. */ - this->owner->Close(); + m_owner->Close(); /* Perform inherited finalization. */ KAutoObjectWithSlabHeapAndContainer::Finalize(); @@ -76,19 +76,19 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Validate the size. */ - R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Ensure we're not already mapped. */ - R_UNLESS(!this->is_mapped, svc::ResultInvalidState()); + R_UNLESS(!m_is_mapped, svc::ResultInvalidState()); /* Map the memory. */ - R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(this->page_group), KMemoryState_CodeOut, KMemoryPermission_UserReadWrite)); + R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(m_page_group), KMemoryState_CodeOut, KMemoryPermission_UserReadWrite)); /* Mark ourselves as mapped. */ - this->is_mapped = true; + m_is_mapped = true; return ResultSuccess(); } @@ -97,17 +97,17 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Validate the size. */ - R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Unmap the memory. */ - R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(this->page_group), KMemoryState_CodeOut)); + R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(m_page_group), KMemoryState_CodeOut)); /* Mark ourselves as unmapped. */ - MESOSPHERE_ASSERT(this->is_mapped); - this->is_mapped = false; + MESOSPHERE_ASSERT(m_is_mapped); + m_is_mapped = false; return ResultSuccess(); } @@ -116,13 +116,13 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Validate the size. */ - R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Ensure we're not already mapped. */ - R_UNLESS(!this->is_owner_mapped, svc::ResultInvalidState()); + R_UNLESS(!m_is_owner_mapped, svc::ResultInvalidState()); /* Convert the memory permission. */ KMemoryPermission k_perm; @@ -133,10 +133,10 @@ namespace ams::kern { } /* Map the memory. */ - R_TRY(this->owner->GetPageTable().MapPageGroup(address, GetReference(this->page_group), KMemoryState_GeneratedCode, k_perm)); + R_TRY(m_owner->GetPageTable().MapPageGroup(address, GetReference(m_page_group), KMemoryState_GeneratedCode, k_perm)); /* Mark ourselves as mapped. */ - this->is_owner_mapped = true; + m_is_owner_mapped = true; return ResultSuccess(); } @@ -145,17 +145,17 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Validate the size. */ - R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Unmap the memory. */ - R_TRY(this->owner->GetPageTable().UnmapPageGroup(address, GetReference(this->page_group), KMemoryState_GeneratedCode)); + R_TRY(m_owner->GetPageTable().UnmapPageGroup(address, GetReference(m_page_group), KMemoryState_GeneratedCode)); /* Mark ourselves as unmapped. */ - MESOSPHERE_ASSERT(this->is_owner_mapped); - this->is_owner_mapped = false; + MESOSPHERE_ASSERT(m_is_owner_mapped); + m_is_owner_mapped = false; return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/kern_k_condition_variable.cpp b/libraries/libmesosphere/source/kern_k_condition_variable.cpp index e86aaad29..3234b87d9 100644 --- a/libraries/libmesosphere/source/kern_k_condition_variable.cpp +++ b/libraries/libmesosphere/source/kern_k_condition_variable.cpp @@ -178,8 +178,8 @@ namespace ams::kern { { KScopedSchedulerLock sl; - auto it = this->tree.nfind_light({ cv_key, -1 }); - while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetConditionVariableKey() == cv_key)) { + auto it = m_tree.nfind_light({ cv_key, -1 }); + while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetConditionVariableKey() == cv_key)) { KThread *target_thread = std::addressof(*it); if (KThread *thread = this->SignalImpl(target_thread); thread != nullptr) { @@ -190,13 +190,13 @@ namespace ams::kern { } } - it = this->tree.erase(it); + it = m_tree.erase(it); target_thread->ClearConditionVariable(); ++num_waiters; } /* If we have no waiters, clear the has waiter flag. */ - if (it == this->tree.end() || it->GetConditionVariableKey() != cv_key) { + if (it == m_tree.end() || it->GetConditionVariableKey() != cv_key) { const u32 has_waiter_flag = 0; WriteToUser(cv_key, std::addressof(has_waiter_flag)); } @@ -266,8 +266,8 @@ namespace ams::kern { /* Update condition variable tracking. */ { - cur_thread->SetConditionVariable(std::addressof(this->tree), addr, key, value); - this->tree.insert(*cur_thread); + cur_thread->SetConditionVariable(std::addressof(m_tree), addr, key, value); + m_tree.insert(*cur_thread); } /* If the timeout is non-zero, set the thread as waiting. */ @@ -290,7 +290,7 @@ namespace ams::kern { } if (cur_thread->IsWaitingForConditionVariable()) { - this->tree.erase(this->tree.iterator_to(*cur_thread)); + m_tree.erase(m_tree.iterator_to(*cur_thread)); cur_thread->ClearConditionVariable(); } } diff --git a/libraries/libmesosphere/source/kern_k_debug_base.cpp b/libraries/libmesosphere/source/kern_k_debug_base.cpp index 8f96f9b10..c41c87f13 100644 --- a/libraries/libmesosphere/source/kern_k_debug_base.cpp +++ b/libraries/libmesosphere/source/kern_k_debug_base.cpp @@ -27,28 +27,28 @@ namespace ams::kern { void KDebugBase::Initialize() { /* Clear the process and continue flags. */ - this->process = nullptr; - this->continue_flags = 0; + m_process = nullptr; + m_continue_flags = 0; } bool KDebugBase::Is64Bit() const { - MESOSPHERE_ASSERT(this->lock.IsLockedByCurrentThread()); - MESOSPHERE_ASSERT(this->process != nullptr); - return this->process->Is64Bit(); + MESOSPHERE_ASSERT(m_lock.IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(m_process != nullptr); + return m_process->Is64Bit(); } Result KDebugBase::QueryMemoryInfo(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, KProcessAddress address) { /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Check that we have a valid process. */ - R_UNLESS(this->process != nullptr, svc::ResultProcessTerminated()); - R_UNLESS(!this->process->IsTerminated(), svc::ResultProcessTerminated()); + R_UNLESS(m_process != nullptr, svc::ResultProcessTerminated()); + R_UNLESS(!m_process->IsTerminated(), svc::ResultProcessTerminated()); /* Query the mapping's info. */ KMemoryInfo info; - R_TRY(process->GetPageTable().QueryInfo(std::addressof(info), out_page_info, address)); + R_TRY(m_process->GetPageTable().QueryInfo(std::addressof(info), out_page_info, address)); /* Write output. */ *out_memory_info = info.GetSvcMemoryInfo(); @@ -57,15 +57,15 @@ namespace ams::kern { Result KDebugBase::ReadMemory(KProcessAddress buffer, KProcessAddress address, size_t size) { /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Check that we have a valid process. */ - R_UNLESS(this->process != nullptr, svc::ResultProcessTerminated()); - R_UNLESS(!this->process->IsTerminated(), svc::ResultProcessTerminated()); + R_UNLESS(m_process != nullptr, svc::ResultProcessTerminated()); + R_UNLESS(!m_process->IsTerminated(), svc::ResultProcessTerminated()); /* Get the page tables. */ KProcessPageTable &debugger_pt = GetCurrentProcess().GetPageTable(); - KProcessPageTable &target_pt = this->process->GetPageTable(); + KProcessPageTable &target_pt = m_process->GetPageTable(); /* Verify that the regions are in range. */ R_UNLESS(target_pt.Contains(address, size), svc::ResultInvalidCurrentMemory()); @@ -150,15 +150,15 @@ namespace ams::kern { Result KDebugBase::WriteMemory(KProcessAddress buffer, KProcessAddress address, size_t size) { /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Check that we have a valid process. */ - R_UNLESS(this->process != nullptr, svc::ResultProcessTerminated()); - R_UNLESS(!this->process->IsTerminated(), svc::ResultProcessTerminated()); + R_UNLESS(m_process != nullptr, svc::ResultProcessTerminated()); + R_UNLESS(!m_process->IsTerminated(), svc::ResultProcessTerminated()); /* Get the page tables. */ KProcessPageTable &debugger_pt = GetCurrentProcess().GetPageTable(); - KProcessPageTable &target_pt = this->process->GetPageTable(); + KProcessPageTable &target_pt = m_process->GetPageTable(); /* Verify that the regions are in range. */ R_UNLESS(target_pt.Contains(address, size), svc::ResultInvalidCurrentMemory()); @@ -280,7 +280,7 @@ namespace ams::kern { /* Lock both ourselves, the target process, and the scheduler. */ KScopedLightLock state_lk(target->GetStateLock()); KScopedLightLock list_lk(target->GetListLock()); - KScopedLightLock this_lk(this->lock); + KScopedLightLock this_lk(m_lock); KScopedSchedulerLock sl; /* Check that the process isn't already being debugged. */ @@ -305,19 +305,19 @@ namespace ams::kern { } /* Set our process member, and open a reference to the target. */ - this->process = target; - this->process->Open(); + m_process = target; + m_process->Open(); /* Set ourselves as the process's attached object. */ - this->old_process_state = this->process->SetDebugObject(this); + m_old_process_state = m_process->SetDebugObject(this); /* Send an event for our attaching to the process. */ this->PushDebugEvent(ams::svc::DebugEvent_CreateProcess); /* Send events for attaching to each thread in the process. */ { - auto end = this->process->GetThreadList().end(); - for (auto it = this->process->GetThreadList().begin(); it != end; ++it) { + auto end = m_process->GetThreadList().end(); + for (auto it = m_process->GetThreadList().begin(); it != end; ++it) { /* Request that we suspend the thread. */ it->RequestSuspend(KThread::SuspendType_Debug); @@ -333,7 +333,7 @@ namespace ams::kern { } /* Send the process's jit debug info, if relevant. */ - if (KEventInfo *jit_info = this->process->GetJitDebugInfo(); jit_info != nullptr) { + if (KEventInfo *jit_info = m_process->GetJitDebugInfo(); jit_info != nullptr) { this->EnqueueDebugEventInfo(jit_info); } @@ -356,12 +356,12 @@ namespace ams::kern { /* Lock both ourselves, the target process, and the scheduler. */ KScopedLightLock state_lk(target->GetStateLock()); KScopedLightLock list_lk(target->GetListLock()); - KScopedLightLock this_lk(this->lock); + KScopedLightLock this_lk(m_lock); KScopedSchedulerLock sl; /* Check that we're still attached to the process, and that it's not terminated. */ /* NOTE: Here Nintendo only checks that this->process is not nullptr. */ - R_UNLESS(this->process == target.GetPointerUnsafe(), svc::ResultProcessTerminated()); + R_UNLESS(m_process == target.GetPointerUnsafe(), svc::ResultProcessTerminated()); R_UNLESS(!target->IsTerminated(), svc::ResultProcessTerminated()); /* Get the currently active threads. */ @@ -418,12 +418,12 @@ namespace ams::kern { /* Lock both ourselves and the target process. */ KScopedLightLock state_lk(target->GetStateLock()); KScopedLightLock list_lk(target->GetListLock()); - KScopedLightLock this_lk(this->lock); + KScopedLightLock this_lk(m_lock); /* Check that we still have our process. */ - if (this->process != nullptr) { + if (m_process != nullptr) { /* Check that our process is the one we got earlier. */ - MESOSPHERE_ASSERT(this->process == target.GetPointerUnsafe()); + MESOSPHERE_ASSERT(m_process == target.GetPointerUnsafe()); /* Lock the scheduler. */ KScopedSchedulerLock sl; @@ -450,10 +450,10 @@ namespace ams::kern { /* Detach from the process. */ target->ClearDebugObject(new_state); - this->process = nullptr; + m_process = nullptr; /* Clear our continue flags. */ - this->continue_flags = 0; + m_continue_flags = 0; } } @@ -468,7 +468,7 @@ namespace ams::kern { Result KDebugBase::GetThreadContext(ams::svc::ThreadContext *out, u64 thread_id, u32 context_flags) { /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Get the thread from its id. */ KThread *thread = KThread::GetThreadFromId(thread_id); @@ -476,7 +476,7 @@ namespace ams::kern { ON_SCOPE_EXIT { thread->Close(); }; /* Verify that the thread is owned by our process. */ - R_UNLESS(this->process == thread->GetOwnerProcess(), svc::ResultInvalidId()); + R_UNLESS(m_process == thread->GetOwnerProcess(), svc::ResultInvalidId()); /* Verify that the thread isn't terminated. */ R_UNLESS(thread->GetState() != KThread::ThreadState_Terminated, svc::ResultTerminationRequested()); @@ -515,7 +515,7 @@ namespace ams::kern { Result KDebugBase::SetThreadContext(const ams::svc::ThreadContext &ctx, u64 thread_id, u32 context_flags) { /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Get the thread from its id. */ KThread *thread = KThread::GetThreadFromId(thread_id); @@ -523,7 +523,7 @@ namespace ams::kern { ON_SCOPE_EXIT { thread->Close(); }; /* Verify that the thread is owned by our process. */ - R_UNLESS(this->process == thread->GetOwnerProcess(), svc::ResultInvalidId()); + R_UNLESS(m_process == thread->GetOwnerProcess(), svc::ResultInvalidId()); /* Verify that the thread isn't terminated. */ R_UNLESS(thread->GetState() != KThread::ThreadState_Terminated, svc::ResultTerminationRequested()); @@ -575,29 +575,29 @@ namespace ams::kern { /* Lock both ourselves, the target process, and the scheduler. */ KScopedLightLock state_lk(target->GetStateLock()); KScopedLightLock list_lk(target->GetListLock()); - KScopedLightLock this_lk(this->lock); + KScopedLightLock this_lk(m_lock); KScopedSchedulerLock sl; /* Check that we're still attached to the process, and that it's not terminated. */ - R_UNLESS(this->process == target.GetPointerUnsafe(), svc::ResultProcessTerminated()); + R_UNLESS(m_process == target.GetPointerUnsafe(), svc::ResultProcessTerminated()); R_UNLESS(!target->IsTerminated(), svc::ResultProcessTerminated()); /* Check that we have no pending events. */ - R_UNLESS(this->event_info_list.empty(), svc::ResultBusy()); + R_UNLESS(m_event_info_list.empty(), svc::ResultBusy()); /* Clear the target's JIT debug info. */ target->ClearJitDebugInfo(); /* Set our continue flags. */ - this->continue_flags = flags; + m_continue_flags = flags; /* Iterate over threads, continuing them as we should. */ bool has_debug_break_thread = false; { /* Parse our flags. */ - const bool exception_handled = (this->continue_flags & ams::svc::ContinueFlag_ExceptionHandled) != 0; - const bool continue_all = (this->continue_flags & ams::svc::ContinueFlag_ContinueAll) != 0; - const bool continue_others = (this->continue_flags & ams::svc::ContinueFlag_ContinueOthers) != 0; + const bool exception_handled = (m_continue_flags & ams::svc::ContinueFlag_ExceptionHandled) != 0; + const bool continue_all = (m_continue_flags & ams::svc::ContinueFlag_ContinueAll) != 0; + const bool continue_others = (m_continue_flags & ams::svc::ContinueFlag_ContinueOthers) != 0; /* Update each thread. */ auto end = target->GetThreadList().end(); @@ -786,15 +786,15 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Push the event to the back of the list. */ - this->event_info_list.push_back(*info); + m_event_info_list.push_back(*info); } KScopedAutoObject KDebugBase::GetProcess() { /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); - return this->process; + return m_process; } template requires (std::same_as || std::same_as) @@ -809,11 +809,11 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Check that we have an event to dequeue. */ - R_UNLESS(!this->event_info_list.empty(), svc::ResultNoEvent()); + R_UNLESS(!m_event_info_list.empty(), svc::ResultNoEvent()); /* Pop the event from the front of the queue. */ - info = std::addressof(this->event_info_list.front()); - this->event_info_list.pop_front(); + info = std::addressof(m_event_info_list.front()); + m_event_info_list.pop_front(); } MESOSPHERE_ASSERT(info != nullptr); @@ -932,16 +932,16 @@ namespace ams::kern { /* Lock both ourselves and the target process. */ KScopedLightLock state_lk(process->GetStateLock()); KScopedLightLock list_lk(process->GetListLock()); - KScopedLightLock this_lk(this->lock); + KScopedLightLock this_lk(m_lock); /* Ensure we finalize exactly once. */ - if (this->process != nullptr) { - MESOSPHERE_ASSERT(this->process == process.GetPointerUnsafe()); + if (m_process != nullptr) { + MESOSPHERE_ASSERT(m_process == process.GetPointerUnsafe()); { KScopedSchedulerLock sl; /* Detach ourselves from the process. */ - process->ClearDebugObject(this->old_process_state); + process->ClearDebugObject(m_old_process_state); /* Release all threads. */ const bool resume = (process->GetState() != KProcess::State_Crashed); @@ -959,7 +959,7 @@ namespace ams::kern { } /* Clear our process. */ - this->process = nullptr; + m_process = nullptr; } } } @@ -970,9 +970,9 @@ namespace ams::kern { { KScopedSchedulerLock sl; - while (!this->event_info_list.empty()) { - KEventInfo *info = std::addressof(this->event_info_list.front()); - this->event_info_list.pop_front(); + while (!m_event_info_list.empty()) { + KEventInfo *info = std::addressof(m_event_info_list.front()); + m_event_info_list.pop_front(); KEventInfo::Free(info); } } @@ -981,7 +981,7 @@ namespace ams::kern { bool KDebugBase::IsSignaled() const { KScopedSchedulerLock sl; - return (!this->event_info_list.empty()) || this->process == nullptr || this->process->IsTerminated(); + return (!m_event_info_list.empty()) || m_process == nullptr || m_process->IsTerminated(); } Result KDebugBase::ProcessDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) { @@ -1007,7 +1007,7 @@ namespace ams::kern { R_SUCCEED_IF(debug == nullptr); /* If the event is an exception and we don't have exception events enabled, we can't handle the event. */ - if (event == ams::svc::DebugEvent_Exception && (debug->continue_flags & ams::svc::ContinueFlag_EnableExceptionEvent) == 0) { + if (event == ams::svc::DebugEvent_Exception && (debug->m_continue_flags & ams::svc::ContinueFlag_EnableExceptionEvent) == 0) { GetCurrentThread().SetDebugExceptionResult(ResultSuccess()); return svc::ResultNotHandled(); } diff --git a/libraries/libmesosphere/source/kern_k_device_address_space.cpp b/libraries/libmesosphere/source/kern_k_device_address_space.cpp index bfecb953a..f9d0d23cd 100644 --- a/libraries/libmesosphere/source/kern_k_device_address_space.cpp +++ b/libraries/libmesosphere/source/kern_k_device_address_space.cpp @@ -28,12 +28,12 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Initialize the device page table. */ - R_TRY(this->table.Initialize(address, size)); + R_TRY(m_table.Initialize(address, size)); /* Set member variables. */ - this->space_address = address; - this->space_size = size; - this->is_initialized = true; + m_space_address = address; + m_space_size = size; + m_is_initialized = true; return ResultSuccess(); } @@ -42,7 +42,7 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Finalize the table. */ - this->table.Finalize(); + m_table.Finalize(); /* Finalize base. */ KAutoObjectWithSlabHeapAndContainer::Finalize(); @@ -50,26 +50,26 @@ namespace ams::kern { Result KDeviceAddressSpace::Attach(ams::svc::DeviceName device_name) { /* Lock the address space. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Attach. */ - return this->table.Attach(device_name, this->space_address, this->space_size); + return m_table.Attach(device_name, m_space_address, m_space_size); } Result KDeviceAddressSpace::Detach(ams::svc::DeviceName device_name) { /* Lock the address space. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Detach. */ - return this->table.Detach(device_name); + return m_table.Detach(device_name); } Result KDeviceAddressSpace::Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool refresh_mappings) { /* Check that the address falls within the space. */ - R_UNLESS((this->space_address <= device_address && device_address + size - 1 <= this->space_address + this->space_size - 1), svc::ResultInvalidCurrentMemory()); + R_UNLESS((m_space_address <= device_address && device_address + size - 1 <= m_space_address + m_space_size - 1), svc::ResultInvalidCurrentMemory()); /* Lock the address space. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Lock the pages. */ KPageGroup pg(page_table->GetBlockInfoManager()); @@ -87,11 +87,11 @@ namespace ams::kern { auto mapped_size_guard = SCOPE_GUARD { *out_mapped_size = 0; }; /* Perform the mapping. */ - R_TRY(this->table.Map(out_mapped_size, pg, device_address, device_perm, refresh_mappings)); + R_TRY(m_table.Map(out_mapped_size, pg, device_address, device_perm, refresh_mappings)); /* Ensure that we unmap the pages if we fail to update the protections. */ /* NOTE: Nintendo does not check the result of this unmap call. */ - auto map_guard = SCOPE_GUARD { this->table.Unmap(device_address, *out_mapped_size); }; + auto map_guard = SCOPE_GUARD { m_table.Unmap(device_address, *out_mapped_size); }; /* Update the protections in accordance with how much we mapped. */ R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size, *out_mapped_size)); @@ -108,10 +108,10 @@ namespace ams::kern { Result KDeviceAddressSpace::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address) { /* Check that the address falls within the space. */ - R_UNLESS((this->space_address <= device_address && device_address + size - 1 <= this->space_address + this->space_size - 1), svc::ResultInvalidCurrentMemory()); + R_UNLESS((m_space_address <= device_address && device_address + size - 1 <= m_space_address + m_space_size - 1), svc::ResultInvalidCurrentMemory()); /* Lock the address space. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Make and open a page group for the unmapped region. */ KPageGroup pg(page_table->GetBlockInfoManager()); @@ -125,7 +125,7 @@ namespace ams::kern { auto unlock_guard = SCOPE_GUARD { page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size, size); }; /* Unmap. */ - R_TRY(this->table.Unmap(pg, device_address)); + R_TRY(m_table.Unmap(pg, device_address)); unlock_guard.Cancel(); } diff --git a/libraries/libmesosphere/source/kern_k_event.cpp b/libraries/libmesosphere/source/kern_k_event.cpp index 08dbb3b87..189e1be94 100644 --- a/libraries/libmesosphere/source/kern_k_event.cpp +++ b/libraries/libmesosphere/source/kern_k_event.cpp @@ -27,19 +27,19 @@ namespace ams::kern { this->Open(); /* Create our sub events. */ - KAutoObject::Create(std::addressof(this->readable_event)); - KAutoObject::Create(std::addressof(this->writable_event)); + KAutoObject::Create(std::addressof(m_readable_event)); + KAutoObject::Create(std::addressof(m_writable_event)); /* Initialize our sub sessions. */ - this->readable_event.Initialize(this); - this->writable_event.Initialize(this); + m_readable_event.Initialize(this); + m_writable_event.Initialize(this); /* Set our owner process. */ - this->owner = GetCurrentProcessPointer(); - this->owner->Open(); + m_owner = GetCurrentProcessPointer(); + m_owner->Open(); /* Mark initialized. */ - this->initialized = true; + m_initialized = true; } void KEvent::Finalize() { diff --git a/libraries/libmesosphere/source/kern_k_handle_table.cpp b/libraries/libmesosphere/source/kern_k_handle_table.cpp index ca71003a8..11e84ff50 100644 --- a/libraries/libmesosphere/source/kern_k_handle_table.cpp +++ b/libraries/libmesosphere/source/kern_k_handle_table.cpp @@ -25,10 +25,10 @@ namespace ams::kern { u16 saved_table_size = 0; { KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); - std::swap(this->table, saved_table); - std::swap(this->table_size, saved_table_size); + std::swap(m_table, saved_table); + std::swap(m_table_size, saved_table_size); } /* Close and free all entries. */ @@ -61,7 +61,7 @@ namespace ams::kern { KAutoObject *obj = nullptr; { KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); if (Entry *entry = this->FindEntry(handle); entry != nullptr) { obj = entry->GetObject(); @@ -79,10 +79,10 @@ namespace ams::kern { Result KHandleTable::Add(ams::svc::Handle *out_handle, KAutoObject *obj, u16 type) { MESOSPHERE_ASSERT_THIS(); KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); /* Never exceed our capacity. */ - R_UNLESS(this->count < this->table_size, svc::ResultOutOfHandles()); + R_UNLESS(m_count < m_table_size, svc::ResultOutOfHandles()); /* Allocate entry, set output handle. */ { @@ -99,10 +99,10 @@ namespace ams::kern { Result KHandleTable::Reserve(ams::svc::Handle *out_handle) { MESOSPHERE_ASSERT_THIS(); KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); /* Never exceed our capacity. */ - R_UNLESS(this->count < this->table_size, svc::ResultOutOfHandles()); + R_UNLESS(m_count < m_table_size, svc::ResultOutOfHandles()); *out_handle = EncodeHandle(this->GetEntryIndex(this->AllocateEntry()), this->AllocateLinearId()); return ResultSuccess(); @@ -111,7 +111,7 @@ namespace ams::kern { void KHandleTable::Unreserve(ams::svc::Handle handle) { MESOSPHERE_ASSERT_THIS(); KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); /* Unpack the handle. */ const auto handle_pack = GetHandleBitPack(handle); @@ -120,11 +120,11 @@ namespace ams::kern { const auto reserved = handle_pack.Get(); MESOSPHERE_ASSERT(reserved == 0); MESOSPHERE_ASSERT(linear_id != 0); - MESOSPHERE_ASSERT(index < this->table_size); + MESOSPHERE_ASSERT(index < m_table_size); /* Free the entry. */ /* NOTE: This code does not check the linear id. */ - Entry *entry = std::addressof(this->table[index]); + Entry *entry = std::addressof(m_table[index]); MESOSPHERE_ASSERT(entry->GetObject() == nullptr); this->FreeEntry(entry); @@ -133,7 +133,7 @@ namespace ams::kern { void KHandleTable::Register(ams::svc::Handle handle, KAutoObject *obj, u16 type) { MESOSPHERE_ASSERT_THIS(); KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); + KScopedSpinLock lk(m_lock); /* Unpack the handle. */ const auto handle_pack = GetHandleBitPack(handle); @@ -142,10 +142,10 @@ namespace ams::kern { const auto reserved = handle_pack.Get(); MESOSPHERE_ASSERT(reserved == 0); MESOSPHERE_ASSERT(linear_id != 0); - MESOSPHERE_ASSERT(index < this->table_size); + MESOSPHERE_ASSERT(index < m_table_size); /* Set the entry. */ - Entry *entry = std::addressof(this->table[index]); + Entry *entry = std::addressof(m_table[index]); MESOSPHERE_ASSERT(entry->GetObject() == nullptr); entry->SetUsed(obj, linear_id, type); diff --git a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp index 7c9a53f49..6665dcdc8 100644 --- a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp +++ b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp @@ -77,14 +77,14 @@ namespace ams::kern { Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const { /* Get and validate addresses/sizes. */ - const uintptr_t rx_address = this->kip_header->GetRxAddress(); - const size_t rx_size = this->kip_header->GetRxSize(); - const uintptr_t ro_address = this->kip_header->GetRoAddress(); - const size_t ro_size = this->kip_header->GetRoSize(); - const uintptr_t rw_address = this->kip_header->GetRwAddress(); - const size_t rw_size = this->kip_header->GetRwSize(); - const uintptr_t bss_address = this->kip_header->GetBssAddress(); - const size_t bss_size = this->kip_header->GetBssSize(); + const uintptr_t rx_address = m_kip_header->GetRxAddress(); + const size_t rx_size = m_kip_header->GetRxSize(); + const uintptr_t ro_address = m_kip_header->GetRoAddress(); + const size_t ro_size = m_kip_header->GetRoSize(); + const uintptr_t rw_address = m_kip_header->GetRwAddress(); + const size_t rw_size = m_kip_header->GetRwSize(); + const uintptr_t bss_address = m_kip_header->GetBssAddress(); + const size_t bss_size = m_kip_header->GetBssSize(); R_UNLESS(util::IsAligned(rx_address, PageSize), svc::ResultInvalidAddress()); R_UNLESS(util::IsAligned(ro_address, PageSize), svc::ResultInvalidAddress()); R_UNLESS(util::IsAligned(rw_address, PageSize), svc::ResultInvalidAddress()); @@ -115,13 +115,13 @@ namespace ams::kern { /* Set fields in parameter. */ out->code_address = map_start + start_address; out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize; - out->program_id = this->kip_header->GetProgramId(); - out->version = this->kip_header->GetVersion(); + out->program_id = m_kip_header->GetProgramId(); + out->version = m_kip_header->GetVersion(); out->flags = 0; MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize)); /* Copy name field. */ - this->kip_header->GetName(out->name, sizeof(out->name)); + m_kip_header->GetName(out->name, sizeof(out->name)); /* Apply ASLR, if needed. */ if (enable_aslr) { @@ -151,34 +151,34 @@ namespace ams::kern { std::memset(GetVoidPointer(address), 0, params.code_num_pages * PageSize); /* Prepare to layout the data. */ - const KProcessAddress rx_address = address + this->kip_header->GetRxAddress(); - const KProcessAddress ro_address = address + this->kip_header->GetRoAddress(); - const KProcessAddress rw_address = address + this->kip_header->GetRwAddress(); - const u8 *rx_binary = reinterpret_cast(this->kip_header + 1); - const u8 *ro_binary = rx_binary + this->kip_header->GetRxCompressedSize(); - const u8 *rw_binary = ro_binary + this->kip_header->GetRoCompressedSize(); + const KProcessAddress rx_address = address + m_kip_header->GetRxAddress(); + const KProcessAddress ro_address = address + m_kip_header->GetRoAddress(); + const KProcessAddress rw_address = address + m_kip_header->GetRwAddress(); + const u8 *rx_binary = reinterpret_cast(m_kip_header + 1); + const u8 *ro_binary = rx_binary + m_kip_header->GetRxCompressedSize(); + const u8 *rw_binary = ro_binary + m_kip_header->GetRoCompressedSize(); /* Copy text. */ - if (util::AlignUp(this->kip_header->GetRxSize(), PageSize)) { - std::memcpy(GetVoidPointer(rx_address), rx_binary, this->kip_header->GetRxCompressedSize()); - if (this->kip_header->IsRxCompressed()) { - BlzUncompress(GetVoidPointer(rx_address + this->kip_header->GetRxCompressedSize())); + if (util::AlignUp(m_kip_header->GetRxSize(), PageSize)) { + std::memcpy(GetVoidPointer(rx_address), rx_binary, m_kip_header->GetRxCompressedSize()); + if (m_kip_header->IsRxCompressed()) { + BlzUncompress(GetVoidPointer(rx_address + m_kip_header->GetRxCompressedSize())); } } /* Copy rodata. */ - if (util::AlignUp(this->kip_header->GetRoSize(), PageSize)) { - std::memcpy(GetVoidPointer(ro_address), ro_binary, this->kip_header->GetRoCompressedSize()); - if (this->kip_header->IsRoCompressed()) { - BlzUncompress(GetVoidPointer(ro_address + this->kip_header->GetRoCompressedSize())); + if (util::AlignUp(m_kip_header->GetRoSize(), PageSize)) { + std::memcpy(GetVoidPointer(ro_address), ro_binary, m_kip_header->GetRoCompressedSize()); + if (m_kip_header->IsRoCompressed()) { + BlzUncompress(GetVoidPointer(ro_address + m_kip_header->GetRoCompressedSize())); } } /* Copy rwdata. */ - if (util::AlignUp(this->kip_header->GetRwSize(), PageSize)) { - std::memcpy(GetVoidPointer(rw_address), rw_binary, this->kip_header->GetRwCompressedSize()); - if (this->kip_header->IsRwCompressed()) { - BlzUncompress(GetVoidPointer(rw_address + this->kip_header->GetRwCompressedSize())); + if (util::AlignUp(m_kip_header->GetRwSize(), PageSize)) { + std::memcpy(GetVoidPointer(rw_address), rw_binary, m_kip_header->GetRwCompressedSize()); + if (m_kip_header->IsRwCompressed()) { + BlzUncompress(GetVoidPointer(rw_address + m_kip_header->GetRwCompressedSize())); } } @@ -192,27 +192,27 @@ namespace ams::kern { } Result KInitialProcessReader::SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const { - const size_t rx_size = this->kip_header->GetRxSize(); - const size_t ro_size = this->kip_header->GetRoSize(); - const size_t rw_size = this->kip_header->GetRwSize(); - const size_t bss_size = this->kip_header->GetBssSize(); + const size_t rx_size = m_kip_header->GetRxSize(); + const size_t ro_size = m_kip_header->GetRoSize(); + const size_t rw_size = m_kip_header->GetRwSize(); + const size_t bss_size = m_kip_header->GetBssSize(); /* Set R-X pages. */ if (rx_size) { - const uintptr_t start = this->kip_header->GetRxAddress() + params.code_address; + const uintptr_t start = m_kip_header->GetRxAddress() + params.code_address; R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(rx_size, PageSize), ams::svc::MemoryPermission_ReadExecute)); } /* Set R-- pages. */ if (ro_size) { - const uintptr_t start = this->kip_header->GetRoAddress() + params.code_address; + const uintptr_t start = m_kip_header->GetRoAddress() + params.code_address; R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(ro_size, PageSize), ams::svc::MemoryPermission_Read)); } /* Set RW- pages. */ if (rw_size || bss_size) { - const uintptr_t start = (rw_size ? this->kip_header->GetRwAddress() : this->kip_header->GetBssAddress()) + params.code_address; - const uintptr_t end = (bss_size ? this->kip_header->GetBssAddress() + bss_size : this->kip_header->GetRwAddress() + rw_size) + params.code_address; + const uintptr_t start = (rw_size ? m_kip_header->GetRwAddress() : m_kip_header->GetBssAddress()) + params.code_address; + const uintptr_t end = (bss_size ? m_kip_header->GetBssAddress() + bss_size : m_kip_header->GetRwAddress() + rw_size) + params.code_address; R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(end - start, PageSize), ams::svc::MemoryPermission_ReadWrite)); } diff --git a/libraries/libmesosphere/source/kern_k_interrupt_event.cpp b/libraries/libmesosphere/source/kern_k_interrupt_event.cpp index 304f319f7..08c46dd71 100644 --- a/libraries/libmesosphere/source/kern_k_interrupt_event.cpp +++ b/libraries/libmesosphere/source/kern_k_interrupt_event.cpp @@ -28,23 +28,23 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Set interrupt id. */ - this->interrupt_id = interrupt_name; + m_interrupt_id = interrupt_name; /* Initialize readable event base. */ KReadableEvent::Initialize(nullptr); /* Try to register the task. */ - R_TRY(KInterruptEventTask::Register(this->interrupt_id, type == ams::svc::InterruptType_Level, this)); + R_TRY(KInterruptEventTask::Register(m_interrupt_id, type == ams::svc::InterruptType_Level, this)); /* Mark initialized. */ - this->is_initialized = true; + m_is_initialized = true; return ResultSuccess(); } void KInterruptEvent::Finalize() { MESOSPHERE_ASSERT_THIS(); - g_interrupt_event_task_table[this->interrupt_id]->Unregister(this->interrupt_id); + g_interrupt_event_task_table[m_interrupt_id]->Unregister(m_interrupt_id); /* Perform inherited finalization. */ KAutoObjectWithSlabHeapAndContainer::Finalize(); @@ -54,13 +54,13 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Lock the task. */ - KScopedLightLock lk(g_interrupt_event_task_table[this->interrupt_id]->GetLock()); + KScopedLightLock lk(g_interrupt_event_task_table[m_interrupt_id]->GetLock()); /* Clear the event. */ R_TRY(KReadableEvent::Reset()); /* Clear the interrupt. */ - Kernel::GetInterruptManager().ClearInterrupt(this->interrupt_id); + Kernel::GetInterruptManager().ClearInterrupt(m_interrupt_id); return ResultSuccess(); } @@ -78,7 +78,7 @@ namespace ams::kern { KInterruptEventTask *task = g_interrupt_event_task_table[interrupt_id]; if (task != nullptr) { /* Check that there's not already an event for this task. */ - R_UNLESS(task->event == nullptr, svc::ResultBusy()); + R_UNLESS(task->m_event == nullptr, svc::ResultBusy()); } else { /* Allocate a new task. */ task = KInterruptEventTask::Allocate(); @@ -93,13 +93,13 @@ namespace ams::kern { /* Register/bind the interrupt task. */ { /* Acqquire exclusive access to the task. */ - KScopedLightLock tlk(task->lock); + KScopedLightLock tlk(task->m_lock); /* Bind the interrupt handler. */ R_TRY(Kernel::GetInterruptManager().BindHandler(task, interrupt_id, GetCurrentCoreId(), KInterruptController::PriorityLevel_High, true, level)); /* Set the event. */ - task->event = event; + task->m_event = event; } /* If we allocated, set the event in the table. */ @@ -119,14 +119,14 @@ namespace ams::kern { KScopedLightLock lk(g_interrupt_event_lock); /* Lock the task. */ - KScopedLightLock tlk(this->lock); + KScopedLightLock tlk(m_lock); /* Ensure we can unregister. */ MESOSPHERE_ABORT_UNLESS(g_interrupt_event_task_table[interrupt_id] == this); - MESOSPHERE_ABORT_UNLESS(this->event != nullptr); + MESOSPHERE_ABORT_UNLESS(m_event != nullptr); /* Unbind the interrupt. */ - this->event = nullptr; + m_event = nullptr; Kernel::GetInterruptManager().UnbindHandler(interrupt_id, GetCurrentCoreId()); } @@ -140,10 +140,10 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Lock the task table. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); - if (this->event != nullptr) { - this->event->Signal(); + if (m_event != nullptr) { + m_event->Signal(); } } } diff --git a/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp b/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp index d553feb8f..4c318ca88 100644 --- a/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp @@ -18,18 +18,18 @@ namespace ams::kern { void KInterruptTaskManager::TaskQueue::Enqueue(KInterruptTask *task) { - MESOSPHERE_ASSERT(task != this->head); - MESOSPHERE_ASSERT(task != this->tail); + MESOSPHERE_ASSERT(task != m_head); + MESOSPHERE_ASSERT(task != m_tail); MESOSPHERE_AUDIT(task->GetNextTask() == nullptr); /* Insert the task into the queue. */ - if (this->tail != nullptr) { - this->tail->SetNextTask(task); + if (m_tail != nullptr) { + m_tail->SetNextTask(task); } else { - this->head = task; + m_head = task; } - this->tail = task; + m_tail = task; /* Set the next task for auditing. */ #if defined (MESOSPHERE_BUILD_FOR_AUDITING) @@ -38,18 +38,18 @@ namespace ams::kern { } void KInterruptTaskManager::TaskQueue::Dequeue() { - MESOSPHERE_ASSERT(this->head != nullptr); - MESOSPHERE_ASSERT(this->tail != nullptr); - MESOSPHERE_AUDIT(this->tail->GetNextTask() == GetDummyInterruptTask()); + MESOSPHERE_ASSERT(m_head != nullptr); + MESOSPHERE_ASSERT(m_tail != nullptr); + MESOSPHERE_AUDIT(m_tail->GetNextTask() == GetDummyInterruptTask()); /* Pop the task from the front of the queue. */ - KInterruptTask *old_head = this->head; + KInterruptTask *old_head = m_head; - if (this->head == this->tail) { - this->head = nullptr; - this->tail = nullptr; + if (m_head == m_tail) { + m_head = nullptr; + m_tail = nullptr; } else { - this->head = this->head->GetNextTask(); + m_head = m_head->GetNextTask(); } #if defined (MESOSPHERE_BUILD_FOR_AUDITING) @@ -72,13 +72,13 @@ namespace ams::kern { { KScopedInterruptDisable di; - task = this->task_queue.GetHead(); + task = m_task_queue.GetHead(); if (task == nullptr) { - this->thread->SetState(KThread::ThreadState_Waiting); + m_thread->SetState(KThread::ThreadState_Waiting); continue; } - this->task_queue.Dequeue(); + m_task_queue.Dequeue(); } /* Do the task. */ @@ -91,20 +91,20 @@ namespace ams::kern { MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1)); /* Create and initialize the thread. */ - this->thread = KThread::Create(); - MESOSPHERE_ABORT_UNLESS(this->thread != nullptr); - MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeHighPriorityThread(this->thread, ThreadFunction, reinterpret_cast(this))); - KThread::Register(this->thread); + m_thread = KThread::Create(); + MESOSPHERE_ABORT_UNLESS(m_thread != nullptr); + MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeHighPriorityThread(m_thread, ThreadFunction, reinterpret_cast(this))); + KThread::Register(m_thread); /* Run the thread. */ - this->thread->Run(); + m_thread->Run(); } void KInterruptTaskManager::EnqueueTask(KInterruptTask *task) { MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled()); /* Enqueue the task and signal the scheduler. */ - this->task_queue.Enqueue(task); + m_task_queue.Enqueue(task); Kernel::GetScheduler().SetInterruptTaskRunnable(); } diff --git a/libraries/libmesosphere/source/kern_k_light_client_session.cpp b/libraries/libmesosphere/source/kern_k_light_client_session.cpp index deacc8ee0..7ebd2fe86 100644 --- a/libraries/libmesosphere/source/kern_k_light_client_session.cpp +++ b/libraries/libmesosphere/source/kern_k_light_client_session.cpp @@ -20,7 +20,7 @@ namespace ams::kern { void KLightClientSession::Destroy() { MESOSPHERE_ASSERT_THIS(); - this->parent->OnClientClosed(); + m_parent->OnClientClosed(); } void KLightClientSession::OnServerClosed() { @@ -42,7 +42,7 @@ namespace ams::kern { cur_thread->SetSyncedObject(nullptr, ResultSuccess()); - R_TRY(this->parent->OnRequest(cur_thread)); + R_TRY(m_parent->OnRequest(cur_thread)); } /* Get the result. */ diff --git a/libraries/libmesosphere/source/kern_k_light_lock.cpp b/libraries/libmesosphere/source/kern_k_light_lock.cpp index a085e1ce7..d7c423092 100644 --- a/libraries/libmesosphere/source/kern_k_light_lock.cpp +++ b/libraries/libmesosphere/source/kern_k_light_lock.cpp @@ -25,13 +25,13 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Ensure we actually have locking to do. */ - if (AMS_UNLIKELY(this->tag.load(std::memory_order_relaxed) != _owner)) { + if (AMS_UNLIKELY(m_tag.load(std::memory_order_relaxed) != _owner)) { return; } /* Add the current thread as a waiter on the owner. */ KThread *owner_thread = reinterpret_cast(_owner & ~1ul); - cur_thread->SetAddressKey(reinterpret_cast(std::addressof(this->tag))); + cur_thread->SetAddressKey(reinterpret_cast(std::addressof(m_tag))); owner_thread->AddWaiter(cur_thread); /* Set thread states. */ @@ -66,7 +66,7 @@ namespace ams::kern { /* Get the next owner. */ s32 num_waiters = 0; - KThread *next_owner = owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast(std::addressof(this->tag))); + KThread *next_owner = owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast(std::addressof(m_tag))); /* Pass the lock to the next owner. */ uintptr_t next_tag = 0; @@ -93,7 +93,7 @@ namespace ams::kern { } /* Write the new tag value. */ - this->tag.store(next_tag); + m_tag.store(next_tag); } } diff --git a/libraries/libmesosphere/source/kern_k_light_server_session.cpp b/libraries/libmesosphere/source/kern_k_light_server_session.cpp index 766b35f90..2bad75f70 100644 --- a/libraries/libmesosphere/source/kern_k_light_server_session.cpp +++ b/libraries/libmesosphere/source/kern_k_light_server_session.cpp @@ -22,7 +22,7 @@ namespace ams::kern { this->CleanupRequests(); - this->parent->OnServerClosed(); + m_parent->OnServerClosed(); } void KLightServerSession::OnClientClosed() { @@ -36,14 +36,14 @@ namespace ams::kern { MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); /* Check that the server isn't closed. */ - R_UNLESS(!this->parent->IsServerClosed(), svc::ResultSessionClosed()); + R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed()); /* Try to sleep the thread. */ - R_UNLESS(this->request_queue.SleepThread(request_thread), svc::ResultTerminationRequested()); + R_UNLESS(m_request_queue.SleepThread(request_thread), svc::ResultTerminationRequested()); /* If we don't have a current request, wake up a server thread to handle it. */ - if (this->current_request == nullptr) { - this->server_queue.WakeupFrontThread(); + if (m_current_request == nullptr) { + m_server_queue.WakeupFrontThread(); } return ResultSuccess(); @@ -62,27 +62,27 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Check that we're open. */ - R_UNLESS(!this->parent->IsClientClosed(), svc::ResultSessionClosed()); - R_UNLESS(!this->parent->IsServerClosed(), svc::ResultSessionClosed()); + R_UNLESS(!m_parent->IsClientClosed(), svc::ResultSessionClosed()); + R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed()); /* Check that we have a request to reply to. */ - R_UNLESS(this->current_request != nullptr, svc::ResultInvalidState()); + R_UNLESS(m_current_request != nullptr, svc::ResultInvalidState()); /* Check that the server thread is correct. */ - R_UNLESS(this->server_thread == server_thread, svc::ResultInvalidState()); + R_UNLESS(m_server_thread == server_thread, svc::ResultInvalidState()); /* If we can reply, do so. */ - if (!this->current_request->IsTerminationRequested()) { - MESOSPHERE_ASSERT(this->current_request->GetState() == KThread::ThreadState_Waiting); - MESOSPHERE_ASSERT(this->request_queue.begin() != this->request_queue.end() && this->current_request == std::addressof(*this->request_queue.begin())); - std::memcpy(this->current_request->GetLightSessionData(), server_thread->GetLightSessionData(), KLightSession::DataSize); - this->request_queue.WakeupThread(this->current_request); + if (!m_current_request->IsTerminationRequested()) { + MESOSPHERE_ASSERT(m_current_request->GetState() == KThread::ThreadState_Waiting); + MESOSPHERE_ASSERT(m_request_queue.begin() != m_request_queue.end() && m_current_request == std::addressof(*m_request_queue.begin())); + std::memcpy(m_current_request->GetLightSessionData(), server_thread->GetLightSessionData(), KLightSession::DataSize); + m_request_queue.WakeupThread(m_current_request); } /* Clear our current request. */ - cur_request = this->current_request; - this->current_request = nullptr; - this->server_thread = nullptr; + cur_request = m_current_request; + m_current_request = nullptr; + m_server_thread = nullptr; } /* Close the current request, if we had one. */ @@ -96,8 +96,8 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Check that we aren't already receiving. */ - R_UNLESS(this->server_queue.IsEmpty(), svc::ResultInvalidState()); - R_UNLESS(this->server_thread == nullptr, svc::ResultInvalidState()); + R_UNLESS(m_server_queue.IsEmpty(), svc::ResultInvalidState()); + R_UNLESS(m_server_thread == nullptr, svc::ResultInvalidState()); /* If we cancelled in a previous loop, clear cancel state. */ if (set_cancellable) { @@ -106,22 +106,22 @@ namespace ams::kern { } /* Check that we're open. */ - R_UNLESS(!this->parent->IsClientClosed(), svc::ResultSessionClosed()); - R_UNLESS(!this->parent->IsServerClosed(), svc::ResultSessionClosed()); + R_UNLESS(!m_parent->IsClientClosed(), svc::ResultSessionClosed()); + R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed()); /* If we have a request available, use it. */ - if (this->current_request == nullptr && !this->request_queue.IsEmpty()) { - this->current_request = std::addressof(*this->request_queue.begin()); - this->current_request->Open(); - this->server_thread = server_thread; + if (m_current_request == nullptr && !m_request_queue.IsEmpty()) { + m_current_request = std::addressof(*m_request_queue.begin()); + m_current_request->Open(); + m_server_thread = server_thread; break; } else { /* Otherwise, wait for a request to come in. */ - R_UNLESS(this->server_queue.SleepThread(server_thread), svc::ResultTerminationRequested()); + R_UNLESS(m_server_queue.SleepThread(server_thread), svc::ResultTerminationRequested()); /* Check if we were cancelled. */ if (server_thread->IsWaitCancelled()) { - this->server_queue.WakeupThread(server_thread); + m_server_queue.WakeupThread(server_thread); server_thread->ClearWaitCancelled(); return svc::ResultCancelled(); } @@ -133,7 +133,7 @@ namespace ams::kern { } /* Copy the client data. */ - std::memcpy(server_thread->GetLightSessionData(), this->current_request->GetLightSessionData(), KLightSession::DataSize); + std::memcpy(server_thread->GetLightSessionData(), m_current_request->GetLightSessionData(), KLightSession::DataSize); return ResultSuccess(); } @@ -144,30 +144,30 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Handle the current request. */ - if (this->current_request != nullptr) { + if (m_current_request != nullptr) { /* Reply to the current request. */ - if (!this->current_request->IsTerminationRequested()) { - MESOSPHERE_ASSERT(this->current_request->GetState() == KThread::ThreadState_Waiting); - MESOSPHERE_ASSERT(this->request_queue.begin() != this->request_queue.end() && this->current_request == std::addressof(*this->request_queue.begin())); - this->request_queue.WakeupThread(this->current_request); - this->current_request->SetSyncedObject(nullptr, svc::ResultSessionClosed()); + if (!m_current_request->IsTerminationRequested()) { + MESOSPHERE_ASSERT(m_current_request->GetState() == KThread::ThreadState_Waiting); + MESOSPHERE_ASSERT(m_request_queue.begin() != m_request_queue.end() && m_current_request == std::addressof(*m_request_queue.begin())); + m_request_queue.WakeupThread(m_current_request); + m_current_request->SetSyncedObject(nullptr, svc::ResultSessionClosed()); } /* Clear our current request. */ - cur_request = this->current_request; - this->current_request = nullptr; - this->server_thread = nullptr; + cur_request = m_current_request; + m_current_request = nullptr; + m_server_thread = nullptr; } /* Reply to all other requests. */ - while (!this->request_queue.IsEmpty()) { - KThread *client_thread = this->request_queue.WakeupFrontThread(); + while (!m_request_queue.IsEmpty()) { + KThread *client_thread = m_request_queue.WakeupFrontThread(); client_thread->SetSyncedObject(nullptr, svc::ResultSessionClosed()); } /* Wake up all server threads. */ - while (!this->server_queue.IsEmpty()) { - this->server_queue.WakeupFrontThread(); + while (!m_server_queue.IsEmpty()) { + m_server_queue.WakeupFrontThread(); } } diff --git a/libraries/libmesosphere/source/kern_k_light_session.cpp b/libraries/libmesosphere/source/kern_k_light_session.cpp index 6132d09b7..b4ed79510 100644 --- a/libraries/libmesosphere/source/kern_k_light_session.cpp +++ b/libraries/libmesosphere/source/kern_k_light_session.cpp @@ -27,44 +27,44 @@ namespace ams::kern { this->Open(); /* Create our sub sessions. */ - KAutoObject::Create(std::addressof(this->server)); - KAutoObject::Create(std::addressof(this->client)); + KAutoObject::Create(std::addressof(m_server)); + KAutoObject::Create(std::addressof(m_client)); /* Initialize our sub sessions. */ - this->server.Initialize(this); - this->client.Initialize(this); + m_server.Initialize(this); + m_client.Initialize(this); /* Set state and name. */ - this->state = State::Normal; - this->name = name; + m_state = State::Normal; + m_name = name; /* Set our owner process. */ - this->process = GetCurrentProcessPointer(); - this->process->Open(); + m_process = GetCurrentProcessPointer(); + m_process->Open(); /* Set our port. */ - this->port = client_port; - if (this->port != nullptr) { - this->port->Open(); + m_port = client_port; + if (m_port != nullptr) { + m_port->Open(); } /* Mark initialized. */ - this->initialized = true; + m_initialized = true; } void KLightSession::Finalize() { - if (this->port != nullptr) { - this->port->OnSessionFinalized(); - this->port->Close(); + if (m_port != nullptr) { + m_port->OnSessionFinalized(); + m_port->Close(); } } void KLightSession::OnServerClosed() { MESOSPHERE_ASSERT_THIS(); - if (this->state == State::Normal) { - this->state = State::ServerClosed; - this->client.OnServerClosed(); + if (m_state == State::Normal) { + m_state = State::ServerClosed; + m_client.OnServerClosed(); } this->Close(); @@ -73,9 +73,9 @@ namespace ams::kern { void KLightSession::OnClientClosed() { MESOSPHERE_ASSERT_THIS(); - if (this->state == State::Normal) { - this->state = State::ClientClosed; - this->server.OnClientClosed(); + if (m_state == State::Normal) { + m_state = State::ClientClosed; + m_server.OnClientClosed(); } this->Close(); diff --git a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp index 2a45306ad..e4b2c371d 100644 --- a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp @@ -53,10 +53,10 @@ namespace ams::kern { } constexpr const char *GetMemoryPermissionString(const KMemoryInfo &info) { - if (info.state == KMemoryState_Free) { + if (info.m_state == KMemoryState_Free) { return " "; } else { - switch (info.perm) { + switch (info.m_perm) { case KMemoryPermission_UserReadExecute: return "r-x"; case KMemoryPermission_UserRead: @@ -70,18 +70,18 @@ namespace ams::kern { } void DumpMemoryInfo(const KMemoryInfo &info) { - const char *state = GetMemoryStateName(info.state); + const char *state = GetMemoryStateName(info.m_state); const char *perm = GetMemoryPermissionString(info); const uintptr_t start = info.GetAddress(); const uintptr_t end = info.GetLastAddress(); const size_t kb = info.GetSize() / 1_KB; - const char l = (info.attribute & KMemoryAttribute_Locked) ? 'L' : '-'; - const char i = (info.attribute & KMemoryAttribute_IpcLocked) ? 'I' : '-'; - const char d = (info.attribute & KMemoryAttribute_DeviceShared) ? 'D' : '-'; - const char u = (info.attribute & KMemoryAttribute_Uncached) ? 'U' : '-'; + const char l = (info.m_attribute & KMemoryAttribute_Locked) ? 'L' : '-'; + const char i = (info.m_attribute & KMemoryAttribute_IpcLocked) ? 'I' : '-'; + const char d = (info.m_attribute & KMemoryAttribute_DeviceShared) ? 'D' : '-'; + const char u = (info.m_attribute & KMemoryAttribute_Uncached) ? 'U' : '-'; - MESOSPHERE_LOG("0x%10lx - 0x%10lx (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, info.ipc_lock_count, info.device_use_count); + MESOSPHERE_LOG("0x%10lx - 0x%10lx (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, info.m_ipc_lock_count, info.m_device_use_count); } } @@ -92,40 +92,40 @@ namespace ams::kern { R_UNLESS(start_block != nullptr, svc::ResultOutOfResource()); /* Set our start and end. */ - this->start_address = st; - this->end_address = nd; - MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->start_address), PageSize)); - MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->end_address), PageSize)); + m_start_address = st; + m_end_address = nd; + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(m_start_address), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(m_end_address), PageSize)); /* Initialize and insert the block. */ - start_block->Initialize(this->start_address, (this->end_address - this->start_address) / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None); - this->memory_block_tree.insert(*start_block); + start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None); + m_memory_block_tree.insert(*start_block); return ResultSuccess(); } void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager *slab_manager) { /* Erase every block until we have none left. */ - auto it = this->memory_block_tree.begin(); - while (it != this->memory_block_tree.end()) { + auto it = m_memory_block_tree.begin(); + while (it != m_memory_block_tree.end()) { KMemoryBlock *block = std::addressof(*it); - it = this->memory_block_tree.erase(it); + it = m_memory_block_tree.erase(it); slab_manager->Free(block); } - MESOSPHERE_ASSERT(this->memory_block_tree.empty()); + MESOSPHERE_ASSERT(m_memory_block_tree.empty()); } KProcessAddress KMemoryBlockManager::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const { if (num_pages > 0) { const KProcessAddress region_end = region_start + region_num_pages * PageSize; const KProcessAddress region_last = region_end - 1; - for (const_iterator it = this->FindIterator(region_start); it != this->memory_block_tree.cend(); it++) { + for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); it++) { const KMemoryInfo info = it->GetMemoryInfo(); if (region_last < info.GetAddress()) { break; } - if (info.state != KMemoryState_Free) { + if (info.m_state != KMemoryState_Free) { continue; } @@ -150,20 +150,20 @@ namespace ams::kern { void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages) { /* Find the iterator now that we've updated. */ iterator it = this->FindIterator(address); - if (address != this->start_address) { + if (address != m_start_address) { it--; } /* Coalesce blocks that we can. */ while (true) { iterator prev = it++; - if (it == this->memory_block_tree.end()) { + if (it == m_memory_block_tree.end()) { break; } if (prev->CanMergeWith(*it)) { KMemoryBlock *block = std::addressof(*it); - this->memory_block_tree.erase(it); + m_memory_block_tree.erase(it); prev->Add(*block); allocator->Free(block); it = prev; @@ -203,7 +203,7 @@ namespace ams::kern { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address); - it = this->memory_block_tree.insert(*new_block); + it = m_memory_block_tree.insert(*new_block); it++; cur_info = it->GetMemoryInfo(); @@ -215,7 +215,7 @@ namespace ams::kern { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address + remaining_size); - it = this->memory_block_tree.insert(*new_block); + it = m_memory_block_tree.insert(*new_block); cur_info = it->GetMemoryInfo(); } @@ -250,7 +250,7 @@ namespace ams::kern { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address); - it = this->memory_block_tree.insert(*new_block); + it = m_memory_block_tree.insert(*new_block); it++; cur_info = it->GetMemoryInfo(); @@ -262,7 +262,7 @@ namespace ams::kern { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address + remaining_size); - it = this->memory_block_tree.insert(*new_block); + it = m_memory_block_tree.insert(*new_block); cur_info = it->GetMemoryInfo(); } @@ -303,11 +303,11 @@ namespace ams::kern { KMemoryInfo cur_info = it->GetMemoryInfo(); /* If we need to, create a new block before and insert it. */ - if (cur_info.address != GetInteger(cur_address)) { + if (cur_info.m_address != GetInteger(cur_address)) { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address); - it = this->memory_block_tree.insert(*new_block); + it = m_memory_block_tree.insert(*new_block); it++; cur_info = it->GetMemoryInfo(); @@ -319,7 +319,7 @@ namespace ams::kern { KMemoryBlock *new_block = allocator->Allocate(); it->Split(new_block, cur_address + remaining_size); - it = this->memory_block_tree.insert(*new_block); + it = m_memory_block_tree.insert(*new_block); cur_info = it->GetMemoryInfo(); } @@ -340,9 +340,9 @@ namespace ams::kern { auto dump_guard = SCOPE_GUARD { this->DumpBlocks(); }; /* Loop over every block, ensuring that we are sorted and coalesced. */ - auto it = this->memory_block_tree.cbegin(); + auto it = m_memory_block_tree.cbegin(); auto prev = it++; - while (it != this->memory_block_tree.cend()) { + while (it != m_memory_block_tree.cend()) { const KMemoryInfo prev_info = prev->GetMemoryInfo(); const KMemoryInfo cur_info = it->GetMemoryInfo(); @@ -357,12 +357,12 @@ namespace ams::kern { } /* If the block is ipc locked, it must have a count. */ - if ((cur_info.attribute & KMemoryAttribute_IpcLocked) != 0 && cur_info.ipc_lock_count == 0) { + if ((cur_info.m_attribute & KMemoryAttribute_IpcLocked) != 0 && cur_info.m_ipc_lock_count == 0) { return false; } /* If the block is device shared, it must have a count. */ - if ((cur_info.attribute & KMemoryAttribute_DeviceShared) != 0 && cur_info.device_use_count == 0) { + if ((cur_info.m_attribute & KMemoryAttribute_DeviceShared) != 0 && cur_info.m_device_use_count == 0) { return false; } @@ -371,15 +371,15 @@ namespace ams::kern { } /* Our loop will miss checking the last block, potentially, so check it. */ - if (prev != this->memory_block_tree.cend()) { + if (prev != m_memory_block_tree.cend()) { const KMemoryInfo prev_info = prev->GetMemoryInfo(); /* If the block is ipc locked, it must have a count. */ - if ((prev_info.attribute & KMemoryAttribute_IpcLocked) != 0 && prev_info.ipc_lock_count == 0) { + if ((prev_info.m_attribute & KMemoryAttribute_IpcLocked) != 0 && prev_info.m_ipc_lock_count == 0) { return false; } /* If the block is device shared, it must have a count. */ - if ((prev_info.attribute & KMemoryAttribute_DeviceShared) != 0 && prev_info.device_use_count == 0) { + if ((prev_info.m_attribute & KMemoryAttribute_DeviceShared) != 0 && prev_info.m_device_use_count == 0) { return false; } } @@ -391,7 +391,7 @@ namespace ams::kern { void KMemoryBlockManager::DumpBlocks() const { /* Dump each block. */ - for (const auto &block : this->memory_block_tree) { + for (const auto &block : m_memory_block_tree) { DumpMemoryInfo(block.GetMemoryInfo()); } } diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index 6ff7acb0c..7143f062c 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -41,7 +41,7 @@ namespace ams::kern { std::memset(GetVoidPointer(management_region), 0, management_region_size); /* Traverse the virtual memory layout tree, initializing each manager as appropriate. */ - while (this->num_managers != MaxManagerCount) { + while (m_num_managers != MaxManagerCount) { /* Locate the region that should initialize the current manager. */ uintptr_t region_address = 0; size_t region_size = 0; @@ -53,7 +53,7 @@ namespace ams::kern { } /* We want to initialize the managers in order. */ - if (it.GetAttributes() != this->num_managers) { + if (it.GetAttributes() != m_num_managers) { continue; } @@ -82,21 +82,21 @@ namespace ams::kern { } /* Initialize a new manager for the region. */ - Impl *manager = std::addressof(this->managers[this->num_managers++]); - MESOSPHERE_ABORT_UNLESS(this->num_managers <= util::size(this->managers)); + Impl *manager = std::addressof(m_managers[m_num_managers++]); + MESOSPHERE_ABORT_UNLESS(m_num_managers <= util::size(m_managers)); const size_t cur_size = manager->Initialize(region_address, region_size, management_region, management_region_end, region_pool); management_region += cur_size; MESOSPHERE_ABORT_UNLESS(management_region <= management_region_end); /* Insert the manager into the pool list. */ - if (this->pool_managers_tail[region_pool] == nullptr) { - this->pool_managers_head[region_pool] = manager; + if (m_pool_managers_tail[region_pool] == nullptr) { + m_pool_managers_head[region_pool] = manager; } else { - this->pool_managers_tail[region_pool]->SetNext(manager); - manager->SetPrev(this->pool_managers_tail[region_pool]); + m_pool_managers_tail[region_pool]->SetNext(manager); + manager->SetPrev(m_pool_managers_tail[region_pool]); } - this->pool_managers_tail[region_pool] = manager; + m_pool_managers_tail[region_pool] = manager; } /* Free each region to its corresponding heap. */ @@ -106,26 +106,26 @@ namespace ams::kern { MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0); /* Free the memory to the heap. */ - this->managers[it.GetAttributes()].Free(it.GetAddress(), it.GetSize() / PageSize); + m_managers[it.GetAttributes()].Free(it.GetAddress(), it.GetSize() / PageSize); } } /* Update the used size for all managers. */ - for (size_t i = 0; i < this->num_managers; ++i) { - this->managers[i].UpdateUsedHeapSize(); + for (size_t i = 0; i < m_num_managers; ++i) { + m_managers[i].UpdateUsedHeapSize(); } } Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) { /* Lock the pool. */ - KScopedLightLock lk(this->pool_locks[pool]); + KScopedLightLock lk(m_pool_locks[pool]); /* Check that we don't already have an optimized process. */ - R_UNLESS(!this->has_optimized_process[pool], svc::ResultBusy()); + R_UNLESS(!m_has_optimized_process[pool], svc::ResultBusy()); /* Set the optimized process id. */ - this->optimized_process_ids[pool] = process_id; - this->has_optimized_process[pool] = true; + m_optimized_process_ids[pool] = process_id; + m_has_optimized_process[pool] = true; /* Clear the management area for the optimized process. */ for (auto *manager = this->GetFirstManager(pool, Direction_FromFront); manager != nullptr; manager = this->GetNextManager(manager, Direction_FromFront)) { @@ -137,11 +137,11 @@ namespace ams::kern { void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) { /* Lock the pool. */ - KScopedLightLock lk(this->pool_locks[pool]); + KScopedLightLock lk(m_pool_locks[pool]); /* If the process was optimized, clear it. */ - if (this->has_optimized_process[pool] && this->optimized_process_ids[pool] == process_id) { - this->has_optimized_process[pool] = false; + if (m_has_optimized_process[pool] && m_optimized_process_ids[pool] == process_id) { + m_has_optimized_process[pool] = false; } } @@ -154,7 +154,7 @@ namespace ams::kern { /* Lock the pool that we're allocating from. */ const auto [pool, dir] = DecodeOption(option); - KScopedLightLock lk(this->pool_locks[pool]); + KScopedLightLock lk(m_pool_locks[pool]); /* Choose a heap based on our page size request. */ const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); @@ -181,7 +181,7 @@ namespace ams::kern { } /* Maintain the optimized memory bitmap, if we should. */ - if (this->has_optimized_process[pool]) { + if (m_has_optimized_process[pool]) { chosen_manager->TrackUnoptimizedAllocation(allocated_block, num_pages); } @@ -251,10 +251,10 @@ namespace ams::kern { /* Lock the pool that we're allocating from. */ const auto [pool, dir] = DecodeOption(option); - KScopedLightLock lk(this->pool_locks[pool]); + KScopedLightLock lk(m_pool_locks[pool]); /* Allocate the page group. */ - R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, this->has_optimized_process[pool], true)); + R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true)); /* Open the first reference to the pages. */ for (const auto &block : *out) { @@ -288,11 +288,11 @@ namespace ams::kern { bool optimized; { /* Lock the pool that we're allocating from. */ - KScopedLightLock lk(this->pool_locks[pool]); + KScopedLightLock lk(m_pool_locks[pool]); /* Check if we have an optimized process. */ - const bool has_optimized = this->has_optimized_process[pool]; - const bool is_optimized = this->optimized_process_ids[pool] == process_id; + const bool has_optimized = m_has_optimized_process[pool]; + const bool is_optimized = m_optimized_process_ids[pool] == process_id; /* Allocate the page group. */ R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false)); @@ -361,7 +361,7 @@ namespace ams::kern { auto &manager = this->GetManager(cur_address); /* Lock the pool for the manager. */ - KScopedLightLock lk(this->pool_locks[manager.GetPool()]); + KScopedLightLock lk(m_pool_locks[manager.GetPool()]); /* Track some or all of the current pages. */ const size_t cur_pages = std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); @@ -395,13 +395,13 @@ namespace ams::kern { MESOSPHERE_ABORT_UNLESS(util::IsAligned(total_management_size, PageSize)); /* Setup region. */ - this->pool = p; - this->management_region = management; - this->page_reference_counts = GetPointer(management + optimize_map_size); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(this->management_region), PageSize)); + m_pool = p; + m_management_region = management; + m_page_reference_counts = GetPointer(management + optimize_map_size); + MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(m_management_region), PageSize)); /* Initialize the manager's KPageHeap. */ - this->heap.Initialize(address, size, management + manager_size, page_heap_size); + m_heap.Initialize(address, size, management + manager_size, page_heap_size); return total_management_size; } @@ -412,7 +412,7 @@ namespace ams::kern { const size_t last = offset + num_pages - 1; /* Track. */ - u64 *optimize_map = GetPointer(this->management_region); + u64 *optimize_map = GetPointer(m_management_region); while (offset <= last) { /* Mark the page as not being optimized-allocated. */ optimize_map[offset / BITSIZEOF(u64)] &= ~(u64(1) << (offset % BITSIZEOF(u64))); @@ -427,7 +427,7 @@ namespace ams::kern { const size_t last = offset + num_pages - 1; /* Track. */ - u64 *optimize_map = GetPointer(this->management_region); + u64 *optimize_map = GetPointer(m_management_region); while (offset <= last) { /* Mark the page as being optimized-allocated. */ optimize_map[offset / BITSIZEOF(u64)] |= (u64(1) << (offset % BITSIZEOF(u64))); @@ -445,7 +445,7 @@ namespace ams::kern { const size_t last = offset + num_pages - 1; /* Process. */ - u64 *optimize_map = GetPointer(this->management_region); + u64 *optimize_map = GetPointer(m_management_region); while (offset <= last) { /* Check if the page has been optimized-allocated before. */ if ((optimize_map[offset / BITSIZEOF(u64)] & (u64(1) << (offset % BITSIZEOF(u64)))) == 0) { @@ -453,7 +453,7 @@ namespace ams::kern { any_new = true; /* Fill the page. */ - std::memset(GetVoidPointer(this->heap.GetAddress() + offset * PageSize), fill_pattern, PageSize); + std::memset(GetVoidPointer(m_heap.GetAddress() + offset * PageSize), fill_pattern, PageSize); } offset++; diff --git a/libraries/libmesosphere/source/kern_k_object_name.cpp b/libraries/libmesosphere/source/kern_k_object_name.cpp index b5fd505a4..00ee3b5a4 100644 --- a/libraries/libmesosphere/source/kern_k_object_name.cpp +++ b/libraries/libmesosphere/source/kern_k_object_name.cpp @@ -26,16 +26,16 @@ namespace ams::kern { void KObjectName::Initialize(KAutoObject *obj, const char *name) { /* Set member variables. */ - this->object = obj; - std::strncpy(this->name, name, sizeof(this->name)); - this->name[sizeof(this->name) - 1] = '\x00'; + m_object = obj; + std::strncpy(m_name, name, sizeof(m_name)); + m_name[sizeof(m_name) - 1] = '\x00'; /* Open a reference to the object we hold. */ - this->object->Open(); + m_object->Open(); } bool KObjectName::MatchesName(const char *name) const { - return std::strncmp(this->name, name, sizeof(this->name)) == 0; + return std::strncmp(m_name, name, sizeof(m_name)) == 0; } Result KObjectName::NewFromName(KAutoObject *obj, const char *name) { diff --git a/libraries/libmesosphere/source/kern_k_page_group.cpp b/libraries/libmesosphere/source/kern_k_page_group.cpp index 1340064c9..14eed0eec 100644 --- a/libraries/libmesosphere/source/kern_k_page_group.cpp +++ b/libraries/libmesosphere/source/kern_k_page_group.cpp @@ -18,11 +18,11 @@ namespace ams::kern { void KPageGroup::Finalize() { - auto it = this->block_list.begin(); - while (it != this->block_list.end()) { + auto it = m_block_list.begin(); + while (it != m_block_list.end()) { KBlockInfo *info = std::addressof(*it); - it = this->block_list.erase(it); - this->manager->Free(info); + it = m_block_list.erase(it); + m_manager->Free(info); } } @@ -44,18 +44,18 @@ namespace ams::kern { MESOSPHERE_ASSERT(addr < addr + num_pages * PageSize); /* Try to just append to the last block. */ - if (!this->block_list.empty()) { - auto it = --(this->block_list.end()); + if (!m_block_list.empty()) { + auto it = --(m_block_list.end()); R_SUCCEED_IF(it->TryConcatenate(addr, num_pages)); } /* Allocate a new block. */ - KBlockInfo *new_block = this->manager->Allocate(); + KBlockInfo *new_block = m_manager->Allocate(); R_UNLESS(new_block != nullptr, svc::ResultOutOfResource()); /* Initialize the block. */ new_block->Initialize(addr, num_pages); - this->block_list.push_back(*new_block); + m_block_list.push_back(*new_block); return ResultSuccess(); } @@ -77,10 +77,10 @@ namespace ams::kern { } bool KPageGroup::IsEquivalentTo(const KPageGroup &rhs) const { - auto lit = this->block_list.cbegin(); - auto rit = rhs.block_list.cbegin(); - auto lend = this->block_list.cend(); - auto rend = rhs.block_list.cend(); + auto lit = m_block_list.cbegin(); + auto rit = rhs.m_block_list.cbegin(); + auto lend = m_block_list.cend(); + auto rend = rhs.m_block_list.cend(); while (lit != lend && rit != rend) { if (*lit != *rit) { diff --git a/libraries/libmesosphere/source/kern_k_page_heap.cpp b/libraries/libmesosphere/source/kern_k_page_heap.cpp index 428a238e2..ee55bb01d 100644 --- a/libraries/libmesosphere/source/kern_k_page_heap.cpp +++ b/libraries/libmesosphere/source/kern_k_page_heap.cpp @@ -25,16 +25,16 @@ namespace ams::kern { const KVirtualAddress management_end = management_address + management_size; /* Set our members. */ - this->heap_address = address; - this->heap_size = size; - this->num_blocks = num_block_shifts; + m_heap_address = address; + m_heap_size = size; + m_num_blocks = num_block_shifts; /* Setup bitmaps. */ u64 *cur_bitmap_storage = GetPointer(management_address); for (size_t i = 0; i < num_block_shifts; i++) { const size_t cur_block_shift = block_shifts[i]; const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0; - cur_bitmap_storage = this->blocks[i].Initialize(this->heap_address, this->heap_size, cur_block_shift, next_block_shift, cur_bitmap_storage); + cur_bitmap_storage = m_blocks[i].Initialize(m_heap_address, m_heap_size, cur_block_shift, next_block_shift, cur_bitmap_storage); } /* Ensure we didn't overextend our bounds. */ @@ -44,19 +44,19 @@ namespace ams::kern { size_t KPageHeap::GetNumFreePages() const { size_t num_free = 0; - for (size_t i = 0; i < this->num_blocks; i++) { - num_free += this->blocks[i].GetNumFreePages(); + for (size_t i = 0; i < m_num_blocks; i++) { + num_free += m_blocks[i].GetNumFreePages(); } return num_free; } KVirtualAddress KPageHeap::AllocateBlock(s32 index, bool random) { - const size_t needed_size = this->blocks[index].GetSize(); + const size_t needed_size = m_blocks[index].GetSize(); - for (s32 i = index; i < static_cast(this->num_blocks); i++) { - if (const KVirtualAddress addr = this->blocks[i].PopBlock(random); addr != Null) { - if (const size_t allocated_size = this->blocks[i].GetSize(); allocated_size > needed_size) { + for (s32 i = index; i < static_cast(m_num_blocks); i++) { + if (const KVirtualAddress addr = m_blocks[i].PopBlock(random); addr != Null) { + if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); } return addr; @@ -68,7 +68,7 @@ namespace ams::kern { void KPageHeap::FreeBlock(KVirtualAddress block, s32 index) { do { - block = this->blocks[index++].PushBlock(block); + block = m_blocks[index++].PushBlock(block); } while (block != Null); } @@ -79,7 +79,7 @@ namespace ams::kern { } /* Find the largest block size that we can free, and free as many as possible. */ - s32 big_index = static_cast(this->num_blocks) - 1; + s32 big_index = static_cast(m_num_blocks) - 1; const KVirtualAddress start = addr; const KVirtualAddress end = addr + num_pages * PageSize; KVirtualAddress before_start = start; @@ -87,7 +87,7 @@ namespace ams::kern { KVirtualAddress after_start = end; KVirtualAddress after_end = end; while (big_index >= 0) { - const size_t block_size = this->blocks[big_index].GetSize(); + const size_t block_size = m_blocks[big_index].GetSize(); const KVirtualAddress big_start = util::AlignUp(GetInteger(start), block_size); const KVirtualAddress big_end = util::AlignDown(GetInteger(end), block_size); if (big_start < big_end) { @@ -105,7 +105,7 @@ namespace ams::kern { /* Free space before the big blocks. */ for (s32 i = big_index - 1; i >= 0; i--) { - const size_t block_size = this->blocks[i].GetSize(); + const size_t block_size = m_blocks[i].GetSize(); while (before_start + block_size <= before_end) { before_end -= block_size; this->FreeBlock(before_end, i); @@ -114,7 +114,7 @@ namespace ams::kern { /* Free space after the big blocks. */ for (s32 i = big_index - 1; i >= 0; i--) { - const size_t block_size = this->blocks[i].GetSize(); + const size_t block_size = m_blocks[i].GetSize(); while (after_start + block_size <= after_end) { this->FreeBlock(after_start, i); after_start += block_size; @@ -135,8 +135,8 @@ namespace ams::kern { void KPageHeap::DumpFreeList() const { MESOSPHERE_RELEASE_LOG("KPageHeap::DumpFreeList %p\n", this); - for (size_t i = 0; i < this->num_blocks; ++i) { - const size_t block_size = this->blocks[i].GetSize(); + for (size_t i = 0; i < m_num_blocks; ++i) { + const size_t block_size = m_blocks[i].GetSize(); const char *suffix; size_t size; if (block_size >= 1_GB) { @@ -153,7 +153,7 @@ namespace ams::kern { size = block_size; } - MESOSPHERE_RELEASE_LOG(" %4zu %s block x %zu\n", size, suffix, this->blocks[i].GetNumFreeBlocks()); + MESOSPHERE_RELEASE_LOG(" %4zu %s block x %zu\n", size, suffix, m_blocks[i].GetNumFreeBlocks()); } } diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index b7805d94f..6b7cbebca 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -16,51 +16,54 @@ #include #include +#undef ALWAYS_INLINE_LAMBDA +#define ALWAYS_INLINE_LAMBDA + namespace ams::kern { Result KPageTableBase::InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end) { /* Initialize our members. */ - this->address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32); - this->address_space_start = KProcessAddress(GetInteger(start)); - this->address_space_end = KProcessAddress(GetInteger(end)); - this->is_kernel = true; - this->enable_aslr = true; - this->enable_device_address_space_merge = false; + m_address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32); + m_address_space_start = KProcessAddress(GetInteger(start)); + m_address_space_end = KProcessAddress(GetInteger(end)); + m_is_kernel = true; + m_enable_aslr = true; + m_enable_device_address_space_merge = false; - this->heap_region_start = 0; - this->heap_region_end = 0; - this->current_heap_end = 0; - this->alias_region_start = 0; - this->alias_region_end = 0; - this->stack_region_start = 0; - this->stack_region_end = 0; - this->kernel_map_region_start = 0; - this->kernel_map_region_end = 0; - this->alias_code_region_start = 0; - this->alias_code_region_end = 0; - this->code_region_start = 0; - this->code_region_end = 0; - this->max_heap_size = 0; - this->mapped_physical_memory_size = 0; - this->mapped_unsafe_physical_memory = 0; + m_heap_region_start = 0; + m_heap_region_end = 0; + m_current_heap_end = 0; + m_alias_region_start = 0; + m_alias_region_end = 0; + m_stack_region_start = 0; + m_stack_region_end = 0; + m_kernel_map_region_start = 0; + m_kernel_map_region_end = 0; + m_alias_code_region_start = 0; + m_alias_code_region_end = 0; + m_code_region_start = 0; + m_code_region_end = 0; + m_max_heap_size = 0; + m_mapped_physical_memory_size = 0; + m_mapped_unsafe_physical_memory = 0; - this->memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager()); - this->block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); + m_memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager()); + m_block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); - this->allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); - this->heap_fill_value = MemoryFillValue_Zero; - this->ipc_fill_value = MemoryFillValue_Zero; - this->stack_fill_value = MemoryFillValue_Zero; + m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); + m_heap_fill_value = MemoryFillValue_Zero; + m_ipc_fill_value = MemoryFillValue_Zero; + m_stack_fill_value = MemoryFillValue_Zero; - this->cached_physical_linear_region = nullptr; - this->cached_physical_heap_region = nullptr; - this->cached_virtual_heap_region = nullptr; + m_cached_physical_linear_region = nullptr; + m_cached_physical_heap_region = nullptr; + m_cached_virtual_heap_region = nullptr; /* Initialize our implementation. */ - this->impl.InitializeForKernel(table, start, end); + m_impl.InitializeForKernel(table, start, end); /* Initialize our memory block manager. */ - return this->memory_block_manager.Initialize(this->address_space_start, this->address_space_end, this->memory_block_slab_manager); + return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager); return ResultSuccess(); } @@ -75,14 +78,14 @@ namespace ams::kern { /* Define helpers. */ auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA { - return KAddressSpaceInfo::GetAddressSpaceStart(this->address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); }; auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA { - return KAddressSpaceInfo::GetAddressSpaceSize(this->address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); }; /* Set our width and heap/alias sizes. */ - this->address_space_width = GetAddressSpaceWidth(as_type); + m_address_space_width = GetAddressSpaceWidth(as_type); size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias); size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap); @@ -97,47 +100,47 @@ namespace ams::kern { KProcessAddress process_code_end; size_t stack_region_size; size_t kernel_map_region_size; - if (this->address_space_width == 39) { + if (m_address_space_width == 39) { alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias); heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap); stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Stack); kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type_MapSmall); - this->code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Map39Bit); - this->code_region_end = this->code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Map39Bit); - this->alias_code_region_start = this->code_region_start; - this->alias_code_region_end = this->code_region_end; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Map39Bit); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Map39Bit); + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = m_code_region_end; process_code_start = util::AlignDown(GetInteger(code_address), RegionAlignment); process_code_end = util::AlignUp(GetInteger(code_address) + code_size, RegionAlignment); } else { stack_region_size = 0; kernel_map_region_size = 0; - this->code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_MapSmall); - this->code_region_end = this->code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_MapSmall); - this->stack_region_start = this->code_region_start; - this->alias_code_region_start = this->code_region_start; - this->alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge) + GetSpaceSize(KAddressSpaceInfo::Type_MapLarge); - this->stack_region_end = this->code_region_end; - this->kernel_map_region_start = this->code_region_start; - this->kernel_map_region_end = this->code_region_end; - process_code_start = this->code_region_start; - process_code_end = this->code_region_end; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_MapSmall); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_MapSmall); + m_stack_region_start = m_code_region_start; + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge) + GetSpaceSize(KAddressSpaceInfo::Type_MapLarge); + m_stack_region_end = m_code_region_end; + m_kernel_map_region_start = m_code_region_start; + m_kernel_map_region_end = m_code_region_end; + process_code_start = m_code_region_start; + process_code_end = m_code_region_end; } /* Set other basic fields. */ - this->enable_aslr = enable_aslr; - this->enable_device_address_space_merge = enable_das_merge; - this->address_space_start = start; - this->address_space_end = end; - this->is_kernel = false; - this->memory_block_slab_manager = mem_block_slab_manager; - this->block_info_manager = block_info_manager; + m_enable_aslr = enable_aslr; + m_enable_device_address_space_merge = enable_das_merge; + m_address_space_start = start; + m_address_space_end = end; + m_is_kernel = false; + m_memory_block_slab_manager = mem_block_slab_manager; + m_block_info_manager = block_info_manager; /* Determine the region we can place our undetermineds in. */ KProcessAddress alloc_start; size_t alloc_size; - if ((GetInteger(process_code_start) - GetInteger(this->code_region_start)) >= (GetInteger(end) - GetInteger(process_code_end))) { - alloc_start = this->code_region_start; - alloc_size = GetInteger(process_code_start) - GetInteger(this->code_region_start); + if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >= (GetInteger(end) - GetInteger(process_code_end))) { + alloc_start = m_code_region_start; + alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start); } else { alloc_start = process_code_end; alloc_size = GetInteger(end) - GetInteger(process_code_end); @@ -157,107 +160,107 @@ namespace ams::kern { } /* Setup heap and alias regions. */ - this->alias_region_start = alloc_start + alias_rnd; - this->alias_region_end = this->alias_region_start + alias_region_size; - this->heap_region_start = alloc_start + heap_rnd; - this->heap_region_end = this->heap_region_start + heap_region_size; + m_alias_region_start = alloc_start + alias_rnd; + m_alias_region_end = m_alias_region_start + alias_region_size; + m_heap_region_start = alloc_start + heap_rnd; + m_heap_region_end = m_heap_region_start + heap_region_size; if (alias_rnd <= heap_rnd) { - this->heap_region_start += alias_region_size; - this->heap_region_end += alias_region_size; + m_heap_region_start += alias_region_size; + m_heap_region_end += alias_region_size; } else { - this->alias_region_start += heap_region_size; - this->alias_region_end += heap_region_size; + m_alias_region_start += heap_region_size; + m_alias_region_end += heap_region_size; } /* Setup stack region. */ if (stack_region_size) { - this->stack_region_start = alloc_start + stack_rnd; - this->stack_region_end = this->stack_region_start + stack_region_size; + m_stack_region_start = alloc_start + stack_rnd; + m_stack_region_end = m_stack_region_start + stack_region_size; if (alias_rnd < stack_rnd) { - this->stack_region_start += alias_region_size; - this->stack_region_end += alias_region_size; + m_stack_region_start += alias_region_size; + m_stack_region_end += alias_region_size; } else { - this->alias_region_start += stack_region_size; - this->alias_region_end += stack_region_size; + m_alias_region_start += stack_region_size; + m_alias_region_end += stack_region_size; } if (heap_rnd < stack_rnd) { - this->stack_region_start += heap_region_size; - this->stack_region_end += heap_region_size; + m_stack_region_start += heap_region_size; + m_stack_region_end += heap_region_size; } else { - this->heap_region_start += stack_region_size; - this->heap_region_end += stack_region_size; + m_heap_region_start += stack_region_size; + m_heap_region_end += stack_region_size; } } /* Setup kernel map region. */ if (kernel_map_region_size) { - this->kernel_map_region_start = alloc_start + kmap_rnd; - this->kernel_map_region_end = this->kernel_map_region_start + kernel_map_region_size; + m_kernel_map_region_start = alloc_start + kmap_rnd; + m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; if (alias_rnd < kmap_rnd) { - this->kernel_map_region_start += alias_region_size; - this->kernel_map_region_end += alias_region_size; + m_kernel_map_region_start += alias_region_size; + m_kernel_map_region_end += alias_region_size; } else { - this->alias_region_start += kernel_map_region_size; - this->alias_region_end += kernel_map_region_size; + m_alias_region_start += kernel_map_region_size; + m_alias_region_end += kernel_map_region_size; } if (heap_rnd < kmap_rnd) { - this->kernel_map_region_start += heap_region_size; - this->kernel_map_region_end += heap_region_size; + m_kernel_map_region_start += heap_region_size; + m_kernel_map_region_end += heap_region_size; } else { - this->heap_region_start += kernel_map_region_size; - this->heap_region_end += kernel_map_region_size; + m_heap_region_start += kernel_map_region_size; + m_heap_region_end += kernel_map_region_size; } if (stack_region_size) { if (stack_rnd < kmap_rnd) { - this->kernel_map_region_start += stack_region_size; - this->kernel_map_region_end += stack_region_size; + m_kernel_map_region_start += stack_region_size; + m_kernel_map_region_end += stack_region_size; } else { - this->stack_region_start += kernel_map_region_size; - this->stack_region_end += kernel_map_region_size; + m_stack_region_start += kernel_map_region_size; + m_stack_region_end += kernel_map_region_size; } } } /* Set heap and fill members. */ - this->current_heap_end = this->heap_region_start; - this->max_heap_size = 0; - this->mapped_physical_memory_size = 0; - this->mapped_unsafe_physical_memory = 0; + m_current_heap_end = m_heap_region_start; + m_max_heap_size = 0; + m_mapped_physical_memory_size = 0; + m_mapped_unsafe_physical_memory = 0; const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled(); - this->heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero; - this->ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero; - this->stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero; + m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero; + m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero; + m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero; /* Set allocation option. */ - this->allocate_option = KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction_FromBack : KMemoryManager::Direction_FromFront); + m_allocate_option = KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction_FromBack : KMemoryManager::Direction_FromFront); /* Ensure that we regions inside our address space. */ - auto IsInAddressSpace = [&](KProcessAddress addr) ALWAYS_INLINE_LAMBDA { return this->address_space_start <= addr && addr <= this->address_space_end; }; - MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->alias_region_start)); - MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->alias_region_end)); - MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->heap_region_start)); - MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->heap_region_end)); - MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->stack_region_start)); - MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->stack_region_end)); - MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->kernel_map_region_start)); - MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(this->kernel_map_region_end)); + auto IsInAddressSpace = [&](KProcessAddress addr) ALWAYS_INLINE_LAMBDA { return m_address_space_start <= addr && addr <= m_address_space_end; }; + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_alias_region_start)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_alias_region_end)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_heap_region_start)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_heap_region_end)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_stack_region_start)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_stack_region_end)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_kernel_map_region_start)); + MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_kernel_map_region_end)); /* Ensure that we selected regions that don't overlap. */ - const KProcessAddress alias_start = this->alias_region_start; - const KProcessAddress alias_last = this->alias_region_end - 1; - const KProcessAddress heap_start = this->heap_region_start; - const KProcessAddress heap_last = this->heap_region_end - 1; - const KProcessAddress stack_start = this->stack_region_start; - const KProcessAddress stack_last = this->stack_region_end - 1; - const KProcessAddress kmap_start = this->kernel_map_region_start; - const KProcessAddress kmap_last = this->kernel_map_region_end - 1; + const KProcessAddress alias_start = m_alias_region_start; + const KProcessAddress alias_last = m_alias_region_end - 1; + const KProcessAddress heap_start = m_heap_region_start; + const KProcessAddress heap_last = m_heap_region_end - 1; + const KProcessAddress stack_start = m_stack_region_start; + const KProcessAddress stack_last = m_stack_region_end - 1; + const KProcessAddress kmap_start = m_kernel_map_region_start; + const KProcessAddress kmap_last = m_kernel_map_region_end - 1; MESOSPHERE_ABORT_UNLESS(alias_last < heap_start || heap_last < alias_start); MESOSPHERE_ABORT_UNLESS(alias_last < stack_start || stack_last < alias_start); MESOSPHERE_ABORT_UNLESS(alias_last < kmap_start || kmap_last < alias_start); @@ -265,10 +268,10 @@ namespace ams::kern { MESOSPHERE_ABORT_UNLESS(heap_last < kmap_start || kmap_last < heap_start); /* Initialize our implementation. */ - this->impl.InitializeForProcess(table, GetInteger(start), GetInteger(end)); + m_impl.InitializeForProcess(table, GetInteger(start), GetInteger(end)); /* Initialize our memory block manager. */ - return this->memory_block_manager.Initialize(this->address_space_start, this->address_space_end, this->memory_block_slab_manager); + return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager); return ResultSuccess(); } @@ -276,11 +279,11 @@ namespace ams::kern { void KPageTableBase::Finalize() { /* Finalize memory blocks. */ - this->memory_block_manager.Finalize(this->memory_block_slab_manager); + m_memory_block_manager.Finalize(m_memory_block_slab_manager); /* Free any unsafe mapped memory. */ - if (this->mapped_unsafe_physical_memory) { - Kernel::GetUnsafeMemory().Release(this->mapped_unsafe_physical_memory); + if (m_mapped_unsafe_physical_memory) { + Kernel::GetUnsafeMemory().Release(m_mapped_unsafe_physical_memory); } /* Invalidate the entire instruction cache. */ @@ -291,19 +294,19 @@ namespace ams::kern { switch (state) { case KMemoryState_Free: case KMemoryState_Kernel: - return this->address_space_start; + return m_address_space_start; case KMemoryState_Normal: - return this->heap_region_start; + return m_heap_region_start; case KMemoryState_Ipc: case KMemoryState_NonSecureIpc: case KMemoryState_NonDeviceIpc: - return this->alias_region_start; + return m_alias_region_start; case KMemoryState_Stack: - return this->stack_region_start; + return m_stack_region_start; case KMemoryState_Io: case KMemoryState_Static: case KMemoryState_ThreadLocal: - return this->kernel_map_region_start; + return m_kernel_map_region_start; case KMemoryState_Shared: case KMemoryState_AliasCode: case KMemoryState_AliasCodeData: @@ -312,10 +315,10 @@ namespace ams::kern { case KMemoryState_SharedCode: case KMemoryState_GeneratedCode: case KMemoryState_CodeOut: - return this->alias_code_region_start; + return m_alias_code_region_start; case KMemoryState_Code: case KMemoryState_CodeData: - return this->code_region_start; + return m_code_region_start; MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } } @@ -324,19 +327,19 @@ namespace ams::kern { switch (state) { case KMemoryState_Free: case KMemoryState_Kernel: - return this->address_space_end - this->address_space_start; + return m_address_space_end - m_address_space_start; case KMemoryState_Normal: - return this->heap_region_end - this->heap_region_start; + return m_heap_region_end - m_heap_region_start; case KMemoryState_Ipc: case KMemoryState_NonSecureIpc: case KMemoryState_NonDeviceIpc: - return this->alias_region_end - this->alias_region_start; + return m_alias_region_end - m_alias_region_start; case KMemoryState_Stack: - return this->stack_region_end - this->stack_region_start; + return m_stack_region_end - m_stack_region_start; case KMemoryState_Io: case KMemoryState_Static: case KMemoryState_ThreadLocal: - return this->kernel_map_region_end - this->kernel_map_region_start; + return m_kernel_map_region_end - m_kernel_map_region_start; case KMemoryState_Shared: case KMemoryState_AliasCode: case KMemoryState_AliasCodeData: @@ -345,10 +348,10 @@ namespace ams::kern { case KMemoryState_SharedCode: case KMemoryState_GeneratedCode: case KMemoryState_CodeOut: - return this->alias_code_region_end - this->alias_code_region_start; + return m_alias_code_region_end - m_alias_code_region_start; case KMemoryState_Code: case KMemoryState_CodeData: - return this->code_region_end - this->code_region_start; + return m_code_region_end - m_code_region_start; MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } } @@ -361,8 +364,8 @@ namespace ams::kern { const size_t region_size = this->GetRegionSize(state); const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1; - const bool is_in_heap = !(end <= this->heap_region_start || this->heap_region_end <= addr); - const bool is_in_alias = !(end <= this->alias_region_start || this->alias_region_end <= addr); + const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr); + const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr); switch (state) { case KMemoryState_Free: case KMemoryState_Kernel: @@ -397,9 +400,9 @@ namespace ams::kern { Result KPageTableBase::CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const { /* Validate the states match expectation. */ - R_UNLESS((info.state & state_mask) == state, svc::ResultInvalidCurrentMemory()); - R_UNLESS((info.perm & perm_mask) == perm, svc::ResultInvalidCurrentMemory()); - R_UNLESS((info.attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory()); + R_UNLESS((info.m_state & state_mask) == state, svc::ResultInvalidCurrentMemory()); + R_UNLESS((info.m_perm & perm_mask) == perm, svc::ResultInvalidCurrentMemory()); + R_UNLESS((info.m_attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory()); return ResultSuccess(); } @@ -409,7 +412,7 @@ namespace ams::kern { /* Get information about the first block. */ const KProcessAddress last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(addr); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); /* If the start address isn't aligned, we need a block. */ @@ -426,7 +429,7 @@ namespace ams::kern { /* Advance our iterator. */ it++; - MESOSPHERE_ASSERT(it != this->memory_block_manager.cend()); + MESOSPHERE_ASSERT(it != m_memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -445,21 +448,21 @@ namespace ams::kern { /* Get information about the first block. */ const KProcessAddress last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(addr); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); /* If the start address isn't aligned, we need a block. */ const size_t blocks_for_start_align = (util::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0; /* Validate all blocks in the range have correct state. */ - const KMemoryState first_state = info.state; - const KMemoryPermission first_perm = info.perm; - const KMemoryAttribute first_attr = info.attribute; + const KMemoryState first_state = info.m_state; + const KMemoryPermission first_perm = info.m_perm; + const KMemoryAttribute first_attr = info.m_attribute; while (true) { /* Validate the current block. */ - R_UNLESS(info.state == first_state, svc::ResultInvalidCurrentMemory()); - R_UNLESS(info.perm == first_perm, svc::ResultInvalidCurrentMemory()); - R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory()); + R_UNLESS(info.m_state == first_state, svc::ResultInvalidCurrentMemory()); + R_UNLESS(info.m_perm == first_perm, svc::ResultInvalidCurrentMemory()); + R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory()); /* Validate against the provided masks. */ R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); @@ -471,7 +474,7 @@ namespace ams::kern { /* Advance our iterator. */ it++; - MESOSPHERE_ASSERT(it != this->memory_block_manager.cend()); + MESOSPHERE_ASSERT(it != m_memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -504,7 +507,7 @@ namespace ams::kern { R_UNLESS(this->Contains(addr, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check that the output page group is empty, if it exists. */ if (out_pg) { @@ -529,7 +532,7 @@ namespace ams::kern { } /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* Decide on new perm and attr. */ @@ -546,7 +549,7 @@ namespace ams::kern { } /* Apply the memory block updates. */ - this->memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None); /* If we have an output group, open. */ if (out_pg) { @@ -566,7 +569,7 @@ namespace ams::kern { R_UNLESS(this->Contains(addr, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check the state. */ KMemoryState old_state; @@ -586,7 +589,7 @@ namespace ams::kern { /* Create an update allocator. */ /* NOTE: Nintendo does not initialize the allocator with any blocks. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(0)); MESOSPHERE_UNUSED(num_allocator_blocks); @@ -600,7 +603,7 @@ namespace ams::kern { } /* Apply the memory block updates. */ - this->memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked); return ResultSuccess(); } @@ -610,7 +613,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(out_info != nullptr); MESOSPHERE_ASSERT(out_page != nullptr); - const KMemoryBlock *block = this->memory_block_manager.FindBlock(address); + const KMemoryBlock *block = m_memory_block_manager.FindBlock(address); R_UNLESS(block != nullptr, svc::ResultInvalidCurrentMemory()); *out_info = block->GetMemoryInfo(); @@ -629,7 +632,7 @@ namespace ams::kern { R_UNLESS((address < address + size), svc::ResultNotFound()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); auto &impl = this->GetImpl(); @@ -690,7 +693,7 @@ namespace ams::kern { Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Validate that the source address's state is valid. */ KMemoryState src_state; @@ -702,11 +705,11 @@ namespace ams::kern { R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator for the source. */ - KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator src_allocator(m_memory_block_slab_manager); R_TRY(src_allocator.Initialize(num_src_allocator_blocks)); /* Create an update allocator for the destination. */ - KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator dst_allocator(m_memory_block_slab_manager); R_TRY(dst_allocator.Initialize(num_dst_allocator_blocks)); /* Map the memory. */ @@ -715,7 +718,7 @@ namespace ams::kern { const size_t num_pages = size / PageSize; /* Create page groups for the memory being unmapped. */ - KPageGroup pg(this->block_info_manager); + KPageGroup pg(m_block_info_manager); /* Create the page group representing the source. */ R_TRY(this->MakePageGroup(pg, src_address, num_pages)); @@ -743,8 +746,8 @@ namespace ams::kern { unprot_guard.Cancel(); /* Apply the memory block updates. */ - this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_src_perm, new_src_attr, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None); - this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_Stack, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_src_perm, new_src_attr, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_Stack, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); } return ResultSuccess(); @@ -752,7 +755,7 @@ namespace ams::kern { Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Validate that the source address's state is valid. */ KMemoryState src_state; @@ -765,11 +768,11 @@ namespace ams::kern { R_TRY(this->CheckMemoryState(nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Stack, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); /* Create an update allocator for the source. */ - KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator src_allocator(m_memory_block_slab_manager); R_TRY(src_allocator.Initialize(num_src_allocator_blocks)); /* Create an update allocator for the destination. */ - KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator dst_allocator(m_memory_block_slab_manager); R_TRY(dst_allocator.Initialize(num_dst_allocator_blocks)); /* Unmap the memory. */ @@ -778,7 +781,7 @@ namespace ams::kern { const size_t num_pages = size / PageSize; /* Create page groups for the memory being unmapped. */ - KPageGroup pg(this->block_info_manager); + KPageGroup pg(m_block_info_manager); /* Create the page group representing the destination. */ R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); @@ -806,8 +809,8 @@ namespace ams::kern { remap_guard.Cancel(); /* Apply the memory block updates. */ - this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked); - this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); } return ResultSuccess(); @@ -818,7 +821,7 @@ namespace ams::kern { R_UNLESS(this->CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidMemoryRegion()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Verify that the source memory is normal heap. */ KMemoryState src_state; @@ -831,11 +834,11 @@ namespace ams::kern { R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator for the source. */ - KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator src_allocator(m_memory_block_slab_manager); R_TRY(src_allocator.Initialize(num_src_allocator_blocks)); /* Create an update allocator for the destination. */ - KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator dst_allocator(m_memory_block_slab_manager); R_TRY(dst_allocator.Initialize(num_dst_allocator_blocks)); /* Map the code memory. */ @@ -844,7 +847,7 @@ namespace ams::kern { const size_t num_pages = size / PageSize; /* Create page groups for the memory being unmapped. */ - KPageGroup pg(this->block_info_manager); + KPageGroup pg(m_block_info_manager); /* Create the page group representing the source. */ R_TRY(this->MakePageGroup(pg, src_address, num_pages)); @@ -871,8 +874,8 @@ namespace ams::kern { unprot_guard.Cancel(); /* Apply the memory block updates. */ - this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None); - this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_AliasCode, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_AliasCode, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); } return ResultSuccess(); @@ -883,7 +886,7 @@ namespace ams::kern { R_UNLESS(this->CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidMemoryRegion()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Verify that the source memory is locked normal heap. */ size_t num_src_allocator_blocks; @@ -896,7 +899,7 @@ namespace ams::kern { /* Determine whether any pages being unmapped are code. */ bool any_code_pages = false; { - KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(dst_address); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); while (true) { /* Get the memory info. */ const KMemoryInfo info = it->GetMemoryInfo(); @@ -931,7 +934,7 @@ namespace ams::kern { const size_t num_pages = size / PageSize; /* Create page groups for the memory being unmapped. */ - KPageGroup pg(this->block_info_manager); + KPageGroup pg(m_block_info_manager); /* Create the page group representing the destination. */ R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); @@ -940,11 +943,11 @@ namespace ams::kern { R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), svc::ResultInvalidMemoryRegion()); /* Create an update allocator for the source. */ - KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator src_allocator(m_memory_block_slab_manager); R_TRY(src_allocator.Initialize(num_src_allocator_blocks)); /* Create an update allocator for the destination. */ - KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator dst_allocator(m_memory_block_slab_manager); R_TRY(dst_allocator.Initialize(num_dst_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -967,8 +970,8 @@ namespace ams::kern { remap_guard.Cancel(); /* Apply the memory block updates. */ - this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); - this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked); /* Note that we reprotected pages. */ reprotected_pages = true; @@ -991,7 +994,7 @@ namespace ams::kern { ams::svc::PageInfo page_info; MESOSPHERE_R_ABORT_UNLESS(this->QueryInfoImpl(&info, &page_info, candidate)); - if (info.state != KMemoryState_Free) { continue; } + if (info.m_state != KMemoryState_Free) { continue; } if (!(region_start <= candidate)) { continue; } if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; } if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= info.GetLastAddress())) { continue; } @@ -1006,12 +1009,12 @@ namespace ams::kern { /* This may theoretically cause an offset to be chosen that cannot be mapped. */ /* We will account for guard pages. */ const size_t offset_pages = KSystemControl::GenerateRandomRange(0, region_num_pages - num_pages - guard_pages); - address = this->memory_block_manager.FindFreeArea(region_start + offset_pages * PageSize, region_num_pages - offset_pages, num_pages, alignment, offset, guard_pages); + address = m_memory_block_manager.FindFreeArea(region_start + offset_pages * PageSize, region_num_pages - offset_pages, num_pages, alignment, offset, guard_pages); } } /* Find the first free area. */ if (address == Null) { - address = this->memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, alignment, offset, guard_pages); + address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, alignment, offset, guard_pages); } } @@ -1020,11 +1023,11 @@ namespace ams::kern { size_t KPageTableBase::GetSize(KMemoryState state) const { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Iterate, counting blocks with the desired state. */ size_t total_size = 0; - for (KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(this->address_space_start); it != this->memory_block_manager.end(); ++it) { + for (KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(m_address_space_start); it != m_memory_block_manager.end(); ++it) { /* Get the memory info. */ const KMemoryInfo info = it->GetMemoryInfo(); if (info.GetState() == state) { @@ -1055,17 +1058,17 @@ namespace ams::kern { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); /* Create a page group to hold the pages we allocate. */ - KPageGroup pg(this->block_info_manager); + KPageGroup pg(m_block_info_manager); /* Allocate the pages. */ - R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, this->allocate_option)); + R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option)); /* Ensure that the page group is closed when we're done working with it. */ ON_SCOPE_EXIT { pg.Close(); }; /* Clear all pages. */ for (const auto &it : pg) { - std::memset(GetVoidPointer(it.GetAddress()), this->heap_fill_value, it.GetSize()); + std::memset(GetVoidPointer(it.GetAddress()), m_heap_fill_value, it.GetSize()); } /* Map the pages. */ @@ -1121,10 +1124,10 @@ namespace ams::kern { KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); size_t pg_pages = pg_it->GetNumPages(); - auto it = this->memory_block_manager.FindIterator(start_address); + auto it = m_memory_block_manager.FindIterator(start_address); while (true) { /* Check that the iterator is valid. */ - MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); /* Get the memory info. */ const KMemoryInfo info = it->GetMemoryInfo(); @@ -1322,7 +1325,7 @@ namespace ams::kern { const size_t num_pages = size / PageSize; /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Verify we can change the memory permission. */ KMemoryState old_state; @@ -1335,7 +1338,7 @@ namespace ams::kern { R_SUCCEED_IF(old_perm == new_perm); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -1346,7 +1349,7 @@ namespace ams::kern { R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null, false, properties, OperationType_ChangePermissions, false)); /* Update the blocks. */ - this->memory_block_manager.Update(&allocator, addr, num_pages, old_state, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(&allocator, addr, num_pages, old_state, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); return ResultSuccess(); } @@ -1355,7 +1358,7 @@ namespace ams::kern { const size_t num_pages = size / PageSize; /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Verify we can change the memory permission. */ KMemoryState old_state; @@ -1364,7 +1367,7 @@ namespace ams::kern { R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, std::addressof(num_allocator_blocks), addr, size, KMemoryState_FlagCode, KMemoryState_FlagCode, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); /* Make a new page group for the region. */ - KPageGroup pg(this->block_info_manager); + KPageGroup pg(m_block_info_manager); /* Determine new perm/state. */ const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); @@ -1391,7 +1394,7 @@ namespace ams::kern { R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -1403,7 +1406,7 @@ namespace ams::kern { R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null, false, properties, operation, false)); /* Update the blocks. */ - this->memory_block_manager.Update(&allocator, addr, num_pages, new_state, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(&allocator, addr, num_pages, new_state, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); /* Ensure cache coherency, if we're setting pages as executable. */ if (is_x) { @@ -1421,7 +1424,7 @@ namespace ams::kern { MESOSPHERE_ASSERT((mask | KMemoryAttribute_SetMask) == KMemoryAttribute_SetMask); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Verify we can change the memory attribute. */ KMemoryState old_state; @@ -1436,7 +1439,7 @@ namespace ams::kern { AttributeTestMask, KMemoryAttribute_None, ~AttributeTestMask)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -1450,70 +1453,70 @@ namespace ams::kern { R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null, false, properties, OperationType_ChangePermissionsAndRefresh, false)); /* Update the blocks. */ - this->memory_block_manager.Update(&allocator, addr, num_pages, old_state, old_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(&allocator, addr, num_pages, old_state, old_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); return ResultSuccess(); } Result KPageTableBase::SetHeapSize(KProcessAddress *out, size_t size) { /* Lock the physical memory mutex. */ - KScopedLightLock map_phys_mem_lk(this->map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); /* Try to perform a reduction in heap, instead of an extension. */ KProcessAddress cur_address; size_t allocation_size; { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Validate that setting heap size is possible at all. */ - R_UNLESS(!this->is_kernel, svc::ResultOutOfMemory()); - R_UNLESS(size <= static_cast(this->heap_region_end - this->heap_region_start), svc::ResultOutOfMemory()); - R_UNLESS(size <= this->max_heap_size, svc::ResultOutOfMemory()); + R_UNLESS(!m_is_kernel, svc::ResultOutOfMemory()); + R_UNLESS(size <= static_cast(m_heap_region_end - m_heap_region_start), svc::ResultOutOfMemory()); + R_UNLESS(size <= m_max_heap_size, svc::ResultOutOfMemory()); - if (size < static_cast(this->current_heap_end - this->heap_region_start)) { + if (size < static_cast(m_current_heap_end - m_heap_region_start)) { /* The size being requested is less than the current size, so we need to free the end of the heap. */ /* Validate memory state. */ size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), - this->heap_region_start + size, (this->current_heap_end - this->heap_region_start) - size, + m_heap_region_start + size, (m_current_heap_end - m_heap_region_start) - size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ KScopedPageTableUpdater updater(this); /* Unmap the end of the heap. */ - const size_t num_pages = ((this->current_heap_end - this->heap_region_start) - size) / PageSize; + const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize; const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None }; - R_TRY(this->Operate(updater.GetPageList(), this->heap_region_start + size, num_pages, Null, false, unmap_properties, OperationType_Unmap, false)); + R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, Null, false, unmap_properties, OperationType_Unmap, false)); /* Release the memory from the resource limit. */ GetCurrentProcess().ReleaseResource(ams::svc::LimitableResource_PhysicalMemoryMax, num_pages * PageSize); /* Apply the memory block update. */ - this->memory_block_manager.Update(std::addressof(allocator), this->heap_region_start + size, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, size == 0 ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, size == 0 ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None); /* Update the current heap end. */ - this->current_heap_end = this->heap_region_start + size; + m_current_heap_end = m_heap_region_start + size; /* Set the output. */ - *out = this->heap_region_start; + *out = m_heap_region_start; return ResultSuccess(); - } else if (size == static_cast(this->current_heap_end - this->heap_region_start)) { + } else if (size == static_cast(m_current_heap_end - m_heap_region_start)) { /* The size requested is exactly the current size. */ - *out = this->heap_region_start; + *out = m_heap_region_start; return ResultSuccess(); } else { /* We have to allocate memory. Determine how much to allocate and where while the table is locked. */ - cur_address = this->current_heap_end; - allocation_size = size - (this->current_heap_end - this->heap_region_start); + cur_address = m_current_heap_end; + allocation_size = size - (m_current_heap_end - m_heap_region_start); } } @@ -1522,8 +1525,8 @@ namespace ams::kern { R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); /* Allocate pages for the heap extension. */ - KPageGroup pg(this->block_info_manager); - R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, this->allocate_option)); + KPageGroup pg(m_block_info_manager); + R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, m_allocate_option)); /* Close the opened pages when we're done with them. */ /* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */ @@ -1531,23 +1534,23 @@ namespace ams::kern { /* Clear all the newly allocated pages. */ for (const auto &it : pg) { - std::memset(GetVoidPointer(it.GetAddress()), this->heap_fill_value, it.GetSize()); + std::memset(GetVoidPointer(it.GetAddress()), m_heap_fill_value, it.GetSize()); } /* Map the pages. */ { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Ensure that the heap hasn't changed since we began executing. */ - MESOSPHERE_ABORT_UNLESS(cur_address == this->current_heap_end); + MESOSPHERE_ABORT_UNLESS(cur_address == m_current_heap_end); /* Check the memory state. */ size_t num_allocator_blocks; - R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), this->current_heap_end, allocation_size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, allocation_size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -1555,32 +1558,32 @@ namespace ams::kern { /* Map the pages. */ const size_t num_pages = allocation_size / PageSize; - const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, (this->current_heap_end == this->heap_region_start) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None }; - R_TRY(this->Operate(updater.GetPageList(), this->current_heap_end, num_pages, pg, map_properties, OperationType_MapGroup, false)); + const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, (m_current_heap_end == m_heap_region_start) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None }; + R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg, map_properties, OperationType_MapGroup, false)); /* We succeeded, so commit our memory reservation. */ memory_reservation.Commit(); /* Apply the memory block update. */ - this->memory_block_manager.Update(std::addressof(allocator), this->current_heap_end, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, this->heap_region_start == this->current_heap_end ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); /* Update the current heap end. */ - this->current_heap_end = this->heap_region_start + size; + m_current_heap_end = m_heap_region_start + size; /* Set the output. */ - *out = this->heap_region_start; + *out = m_heap_region_start; return ResultSuccess(); } } Result KPageTableBase::SetMaxHeapSize(size_t size) { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Only process page tables are allowed to set heap size. */ MESOSPHERE_ASSERT(!this->IsKernel()); - this->max_heap_size = size; + m_max_heap_size = size; return ResultSuccess(); } @@ -1589,18 +1592,18 @@ namespace ams::kern { /* If the address is invalid, create a fake block. */ if (!this->Contains(addr, 1)) { *out_info = { - .address = GetInteger(this->address_space_end), - .size = 0 - GetInteger(this->address_space_end), - .state = static_cast(ams::svc::MemoryState_Inaccessible), - .device_disable_merge_left_count = 0, - .device_disable_merge_right_count = 0, - .ipc_lock_count = 0, - .device_use_count = 0, - .ipc_disable_merge_count = 0, - .perm = KMemoryPermission_None, - .attribute = KMemoryAttribute_None, - .original_perm = KMemoryPermission_None, - .disable_merge_attribute = KMemoryBlockDisableMergeAttribute_None, + .m_address = GetInteger(m_address_space_end), + .m_size = 0 - GetInteger(m_address_space_end), + .m_state = static_cast(ams::svc::MemoryState_Inaccessible), + .m_device_disable_merge_left_count = 0, + .m_device_disable_merge_right_count = 0, + .m_ipc_lock_count = 0, + .m_device_use_count = 0, + .m_ipc_disable_merge_count = 0, + .m_perm = KMemoryPermission_None, + .m_attribute = KMemoryAttribute_None, + .m_original_perm = KMemoryPermission_None, + .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute_None, }; out_page_info->flags = 0; @@ -1608,13 +1611,13 @@ namespace ams::kern { } /* Otherwise, lock the table and query. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); return this->QueryInfoImpl(out_info, out_page_info, addr); } Result KPageTableBase::QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Align the address down to page size. */ address = util::AlignDown(GetInteger(address), PageSize); @@ -1639,7 +1642,7 @@ namespace ams::kern { /* Begin traversal. */ TraversalContext context; TraversalEntry next_entry; - bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr); + bool traverse_valid = m_impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr); R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory()); /* Set tracking variables. */ @@ -1649,7 +1652,7 @@ namespace ams::kern { /* Iterate. */ while (true) { /* Continue the traversal. */ - traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + traverse_valid = m_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); if (!traverse_valid) { break; } @@ -1728,7 +1731,7 @@ namespace ams::kern { }; /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Select an address to map at. */ KProcessAddress addr = Null; @@ -1751,7 +1754,7 @@ namespace ams::kern { MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(KMemoryBlockManagerUpdateAllocator::MaxBlocks)); /* We're going to perform an update, so create a helper. */ @@ -1762,7 +1765,7 @@ namespace ams::kern { R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false)); /* Update the blocks. */ - this->memory_block_manager.Update(&allocator, addr, num_pages, KMemoryState_Io, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(&allocator, addr, num_pages, KMemoryState_Io, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); /* We successfully mapped the pages. */ return ResultSuccess(); @@ -1795,7 +1798,7 @@ namespace ams::kern { R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, svc::ResultInvalidAddress()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Select an address to map at. */ KProcessAddress addr = Null; @@ -1818,7 +1821,7 @@ namespace ams::kern { MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(KMemoryBlockManagerUpdateAllocator::MaxBlocks)); /* We're going to perform an update, so create a helper. */ @@ -1829,7 +1832,7 @@ namespace ams::kern { R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false)); /* Update the blocks. */ - this->memory_block_manager.Update(&allocator, addr, num_pages, KMemoryState_Static, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(&allocator, addr, num_pages, KMemoryState_Static, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); /* We successfully mapped the pages. */ return ResultSuccess(); @@ -1859,7 +1862,7 @@ namespace ams::kern { R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Find a random address to map at. */ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages()); @@ -1869,7 +1872,7 @@ namespace ams::kern { MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(KMemoryBlockManagerUpdateAllocator::MaxBlocks)); /* We're going to perform an update, so create a helper. */ @@ -1884,7 +1887,7 @@ namespace ams::kern { } /* Update the blocks. */ - this->memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); /* We successfully mapped the pages. */ *out_addr = addr; @@ -1897,14 +1900,14 @@ namespace ams::kern { R_UNLESS(this->CanContain(address, size, state), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check the memory state. */ size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -1914,7 +1917,7 @@ namespace ams::kern { R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm)); /* Update the blocks. */ - this->memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); return ResultSuccess(); } @@ -1925,14 +1928,14 @@ namespace ams::kern { R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check the memory state. */ size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, state, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -1943,7 +1946,7 @@ namespace ams::kern { R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null, false, unmap_properties, OperationType_Unmap, false)); /* Update the blocks. */ - this->memory_block_manager.Update(&allocator, address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); + m_memory_block_manager.Update(&allocator, address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); return ResultSuccess(); } @@ -1957,7 +1960,7 @@ namespace ams::kern { R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Find a random address to map at. */ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, 0, this->GetNumGuardPages()); @@ -1966,7 +1969,7 @@ namespace ams::kern { MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(KMemoryBlockManagerUpdateAllocator::MaxBlocks)); /* We're going to perform an update, so create a helper. */ @@ -1977,7 +1980,7 @@ namespace ams::kern { R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); /* Update the blocks. */ - this->memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); /* We successfully mapped the pages. */ *out_addr = addr; @@ -1993,14 +1996,14 @@ namespace ams::kern { R_UNLESS(this->CanContain(addr, size, state), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check if state allows us to map. */ size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -2011,7 +2014,7 @@ namespace ams::kern { R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); /* Update the blocks. */ - this->memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); /* We successfully mapped the pages. */ return ResultSuccess(); @@ -2026,7 +2029,7 @@ namespace ams::kern { R_UNLESS(this->CanContain(address, size, state), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check if state allows us to unmap. */ size_t num_allocator_blocks; @@ -2036,7 +2039,7 @@ namespace ams::kern { R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), svc::ResultInvalidCurrentMemory()); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -2047,7 +2050,7 @@ namespace ams::kern { R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null, false, properties, OperationType_Unmap, false)); /* Update the blocks. */ - this->memory_block_manager.Update(&allocator, address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); + m_memory_block_manager.Update(&allocator, address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); return ResultSuccess(); } @@ -2061,7 +2064,7 @@ namespace ams::kern { R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check if state allows us to create the group. */ R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr)); @@ -2080,7 +2083,7 @@ namespace ams::kern { R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check the memory state. */ R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_Uncached, KMemoryAttribute_None)); @@ -2147,7 +2150,7 @@ namespace ams::kern { R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Require that the memory either be user readable or debuggable. */ const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None)); @@ -2229,7 +2232,7 @@ namespace ams::kern { R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Require that the memory either be user writable or debuggable. */ const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None)); @@ -2318,7 +2321,7 @@ namespace ams::kern { R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check the memory state. */ const u32 test_state = (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap); @@ -2331,11 +2334,11 @@ namespace ams::kern { } /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* Update the memory blocks. */ - this->memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None); + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None); /* Open the page group. */ if (out != nullptr) { @@ -2351,7 +2354,7 @@ namespace ams::kern { R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check the memory state. */ size_t num_allocator_blocks; @@ -2362,11 +2365,11 @@ namespace ams::kern { KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* Update the memory blocks. */ - this->memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UnshareToDevice, KMemoryPermission_None); + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UnshareToDevice, KMemoryPermission_None); return ResultSuccess(); } @@ -2377,7 +2380,7 @@ namespace ams::kern { R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check the memory state. */ size_t num_allocator_blocks; @@ -2388,15 +2391,15 @@ namespace ams::kern { KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* Make the page group. */ R_TRY(this->MakePageGroup(*out, address, num_pages)); /* Update the memory blocks. */ - const KMemoryBlockManager::MemoryBlockLockFunction lock_func = this->enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; - this->memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission_None); + const KMemoryBlockManager::MemoryBlockLockFunction lock_func = m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission_None); /* Open a reference to the pages in the page group. */ out->Open(); @@ -2410,7 +2413,7 @@ namespace ams::kern { R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Determine useful extents. */ const KProcessAddress mapped_end_address = address + mapped_size; @@ -2419,7 +2422,7 @@ namespace ams::kern { /* Check memory state. */ size_t allocator_num_blocks = 0, unmapped_allocator_num_blocks = 0; if (unmapped_size) { - if (this->enable_device_address_space_merge) { + if (m_enable_device_address_space_merge) { R_TRY(this->CheckMemoryState(std::addressof(allocator_num_blocks), address, size, KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap, @@ -2440,11 +2443,11 @@ namespace ams::kern { } /* Create an update allocator for the region. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(allocator_num_blocks)); /* Create an update allocator for the unmapped region. */ - KMemoryBlockManagerUpdateAllocator unmapped_allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator unmapped_allocator(m_memory_block_slab_manager); R_TRY(unmapped_allocator.Initialize(unmapped_allocator_num_blocks)); /* Determine parameters for the update lock call. */ @@ -2454,8 +2457,8 @@ namespace ams::kern { KMemoryBlockManager::MemoryBlockLockFunction lock_func; if (unmapped_size) { /* If device address space merge is enabled, update tracking appropriately. */ - if (this->enable_device_address_space_merge) { - this->memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareLeft, KMemoryPermission_None); + if (m_enable_device_address_space_merge) { + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareLeft, KMemoryPermission_None); } lock_allocator = std::addressof(unmapped_allocator); @@ -2466,7 +2469,7 @@ namespace ams::kern { lock_allocator = std::addressof(allocator); lock_address = address; lock_num_pages = num_pages; - if (this->enable_device_address_space_merge) { + if (m_enable_device_address_space_merge) { lock_func = &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare; } else { lock_func = &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight; @@ -2474,7 +2477,7 @@ namespace ams::kern { } /* Update the memory blocks. */ - this->memory_block_manager.UpdateLock(lock_allocator, lock_address, lock_num_pages, lock_func, KMemoryPermission_None); + m_memory_block_manager.UpdateLock(lock_allocator, lock_address, lock_num_pages, lock_func, KMemoryPermission_None); return ResultSuccess(); } @@ -2540,7 +2543,7 @@ namespace ams::kern { /* Copy the memory. */ { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check memory state. */ R_TRY(this->CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr)); @@ -2620,7 +2623,7 @@ namespace ams::kern { /* Copy the memory. */ { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check memory state. */ R_TRY(this->CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr)); @@ -2689,7 +2692,7 @@ namespace ams::kern { /* Copy the memory. */ { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check memory state. */ R_TRY(this->CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr)); @@ -2769,7 +2772,7 @@ namespace ams::kern { /* Copy the memory. */ { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check memory state. */ R_TRY(this->CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr)); @@ -2842,8 +2845,8 @@ namespace ams::kern { /* Copy the memory. */ { /* Get the table locks. */ - KLightLock &lock_0 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? src_page_table.general_lock : dst_page_table.general_lock; - KLightLock &lock_1 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? dst_page_table.general_lock : src_page_table.general_lock; + KLightLock &lock_0 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? src_page_table.m_general_lock : dst_page_table.m_general_lock; + KLightLock &lock_1 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? dst_page_table.m_general_lock : src_page_table.m_general_lock; /* Lock the first lock. */ KScopedLightLock lk0(lock_0); @@ -2969,8 +2972,8 @@ namespace ams::kern { /* Copy the memory. */ { /* Get the table locks. */ - KLightLock &lock_0 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? src_page_table.general_lock : dst_page_table.general_lock; - KLightLock &lock_1 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? dst_page_table.general_lock : src_page_table.general_lock; + KLightLock &lock_0 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? src_page_table.m_general_lock : dst_page_table.m_general_lock; + KLightLock &lock_1 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? dst_page_table.m_general_lock : src_page_table.m_general_lock; /* Lock the first lock. */ KScopedLightLock lk0(lock_0); @@ -3141,7 +3144,7 @@ namespace ams::kern { size_t blocks_needed = 0; /* Iterate, mapping as needed. */ - KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(aligned_src_start); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); while (true) { const KMemoryInfo info = it->GetMemoryInfo(); @@ -3179,7 +3182,7 @@ namespace ams::kern { /* Advance. */ ++it; - MESOSPHERE_ABORT_UNLESS(it != this->memory_block_manager.end()); + MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end()); } /* We succeeded, so no need to cleanup. */ @@ -3198,8 +3201,8 @@ namespace ams::kern { MESOSPHERE_ASSERT(src_page_table.IsLockedByCurrentThread()); /* Check that we can theoretically map. */ - const KProcessAddress region_start = this->alias_region_start; - const size_t region_size = this->alias_region_end - this->alias_region_start; + const KProcessAddress region_start = m_alias_region_start; + const size_t region_size = m_alias_region_end - m_alias_region_start; R_UNLESS(size < region_size, svc::ResultOutOfAddressSpace()); /* Get aligned source extents. */ @@ -3229,7 +3232,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(KMemoryBlockManagerUpdateAllocator::MaxBlocks)); /* We're going to perform an update, so create a helper. */ @@ -3264,13 +3267,13 @@ namespace ams::kern { /* Allocate the start page as needed. */ if (aligned_src_start < mapping_src_start) { - start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, this->allocate_option); + start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option); R_UNLESS(start_partial_page != Null, svc::ResultOutOfMemory()); } /* Allocate the end page as needed. */ if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { - end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, this->allocate_option); + end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option); R_UNLESS(end_partial_page != Null, svc::ResultOutOfMemory()); } @@ -3278,7 +3281,7 @@ namespace ams::kern { auto &src_impl = src_page_table.GetImpl(); /* Get the fill value for partial pages. */ - const auto fill_val = this->ipc_fill_value; + const auto fill_val = m_ipc_fill_value; /* Begin traversal. */ TraversalContext context; @@ -3392,7 +3395,7 @@ namespace ams::kern { } /* Update memory blocks to reflect our changes */ - this->memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, dst_state, test_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, dst_state, test_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); /* Set the output address. */ *out_addr = dst_addr + (src_start - aligned_src_start); @@ -3408,8 +3411,8 @@ namespace ams::kern { KPageTableBase &dst_page_table = *this; /* Get the table locks. */ - KLightLock &lock_0 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? src_page_table.general_lock : dst_page_table.general_lock; - KLightLock &lock_1 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? dst_page_table.general_lock : src_page_table.general_lock; + KLightLock &lock_0 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? src_page_table.m_general_lock : dst_page_table.m_general_lock; + KLightLock &lock_1 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? dst_page_table.m_general_lock : src_page_table.m_general_lock; /* Lock the first lock. */ KScopedLightLock lk0(lock_0); @@ -3428,7 +3431,7 @@ namespace ams::kern { R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), std::addressof(num_allocator_blocks), src_addr, size, test_perm, dst_state)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(src_page_table.memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(src_page_table.m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* Get the mapped extents. */ @@ -3450,7 +3453,7 @@ namespace ams::kern { /* If anything was mapped, ipc-lock the pages. */ if (src_map_start < src_map_end) { /* Get the source permission. */ - src_page_table.memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, (src_map_end - src_map_start) / PageSize, &KMemoryBlock::LockForIpc, src_perm); + src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, (src_map_end - src_map_start) / PageSize, &KMemoryBlock::LockForIpc, src_perm); } /* We succeeded, so cancel our cleanup guard. */ @@ -3464,14 +3467,14 @@ namespace ams::kern { R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Validate the memory state. */ size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, dst_state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_All, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -3488,7 +3491,7 @@ namespace ams::kern { R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, Null, false, unmap_properties, OperationType_Unmap, false)); /* Update memory blocks. */ - this->memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); + m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); /* Release from the resource limit as relevant. */ if (auto *resource_limit = server_process->GetResourceLimit(); resource_limit != nullptr) { @@ -3536,7 +3539,7 @@ namespace ams::kern { /* Lock the table. */ /* NOTE: Nintendo does this *after* creating the updater below, but this does not follow convention elsewhere in KPageTableBase. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* We're going to perform an update, so create a helper. */ KScopedPageTableUpdater updater(this); @@ -3550,7 +3553,7 @@ namespace ams::kern { const auto mapped_last = mapped_end - 1; /* Get current and next iterators. */ - KMemoryBlockManager::const_iterator start_it = this->memory_block_manager.FindIterator(mapping_start); + KMemoryBlockManager::const_iterator start_it = m_memory_block_manager.FindIterator(mapping_start); KMemoryBlockManager::const_iterator next_it = start_it; ++next_it; @@ -3566,7 +3569,7 @@ namespace ams::kern { while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) { /* Check that we have a next block. */ - MESOSPHERE_ABORT_UNLESS(next_it != this->memory_block_manager.end()); + MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end()); /* Get the next info. */ const KMemoryInfo next_info = next_it->GetMemoryInfo(); @@ -3608,7 +3611,7 @@ namespace ams::kern { /* Iterate, reprotecting as needed. */ { /* Get current and next iterators. */ - KMemoryBlockManager::const_iterator start_it = this->memory_block_manager.FindIterator(mapping_start); + KMemoryBlockManager::const_iterator start_it = m_memory_block_manager.FindIterator(mapping_start); KMemoryBlockManager::const_iterator next_it = start_it; ++next_it; @@ -3625,7 +3628,7 @@ namespace ams::kern { while ((cur_address + cur_size - 1) < mapping_last) { /* Check that we have a next block. */ - MESOSPHERE_ABORT_UNLESS(next_it != this->memory_block_manager.end()); + MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end()); /* Get the next info. */ const KMemoryInfo next_info = next_it->GetMemoryInfo(); @@ -3663,7 +3666,7 @@ namespace ams::kern { } /* Process the last block. */ - const auto lock_count = cur_info.GetIpcLockCount() + (next_it != this->memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0); + const auto lock_count = cur_info.GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0); if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { const DisableMergeAttribute head_body_attr = first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None; const DisableMergeAttribute tail_attr = lock_count == 1 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None; @@ -3674,11 +3677,11 @@ namespace ams::kern { /* Create an update allocator. */ /* NOTE: Guaranteed zero blocks needed here. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(0)); /* Unlock the pages. */ - this->memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, KMemoryPermission_None); + m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, KMemoryPermission_None); /* We succeeded, so no need to unmap. */ unmap_guard.Cancel(); @@ -3700,7 +3703,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(src_map_end > src_map_start); /* Iterate over blocks, fixing permissions. */ - KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(address); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); while (true) { const KMemoryInfo info = it->GetMemoryInfo(); @@ -3721,7 +3724,7 @@ namespace ams::kern { auto next_it = it; ++next_it; - const auto lock_count = info.GetIpcLockCount() + (next_it != this->memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0); + const auto lock_count = info.GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0); tail_attr = lock_count == 0 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None; } else { tail_attr = DisableMergeAttribute_None; @@ -3739,7 +3742,7 @@ namespace ams::kern { /* Advance. */ ++it; - MESOSPHERE_ABORT_UNLESS(it != this->memory_block_manager.end()); + MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end()); } } @@ -3747,7 +3750,7 @@ namespace ams::kern { Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) { /* Lock the physical memory lock. */ - KScopedLightLock phys_lk(this->map_physical_memory_lock); + KScopedLightLock phys_lk(m_map_physical_memory_lock); /* Calculate the last address for convenience. */ const KProcessAddress last_address = address + size - 1; @@ -3761,16 +3764,16 @@ namespace ams::kern { /* Check if the memory is already mapped. */ { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Iterate over the memory. */ cur_address = address; mapped_size = 0; - auto it = this->memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { /* Check that the iterator is valid. */ - MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); /* Get the memory info. */ const KMemoryInfo info = it->GetMemoryInfo(); @@ -3804,8 +3807,8 @@ namespace ams::kern { R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); /* Allocate pages for the new memory. */ - KPageGroup pg(this->block_info_manager); - R_TRY(Kernel::GetMemoryManager().AllocateAndOpenForProcess(std::addressof(pg), (size - mapped_size) / PageSize, this->allocate_option, GetCurrentProcess().GetId(), this->heap_fill_value)); + KPageGroup pg(m_block_info_manager); + R_TRY(Kernel::GetMemoryManager().AllocateAndOpenForProcess(std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option, GetCurrentProcess().GetId(), m_heap_fill_value)); /* Close our reference when we're done. */ ON_SCOPE_EXIT { pg.Close(); }; @@ -3813,7 +3816,7 @@ namespace ams::kern { /* Map the memory. */ { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); size_t num_allocator_blocks = 0; @@ -3823,10 +3826,10 @@ namespace ams::kern { size_t checked_mapped_size = 0; cur_address = address; - auto it = this->memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { /* Check that the iterator is valid. */ - MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); /* Get the memory info. */ const KMemoryInfo info = it->GetMemoryInfo(); @@ -3868,7 +3871,7 @@ namespace ams::kern { /* Create an update allocator. */ MESOSPHERE_ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -3883,10 +3886,10 @@ namespace ams::kern { /* Iterate, unmapping the pages. */ cur_address = address; - auto it = this->memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { /* Check that the iterator is valid. */ - MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); /* Get the memory info. */ const KMemoryInfo info = it->GetMemoryInfo(); @@ -3918,10 +3921,10 @@ namespace ams::kern { KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); size_t pg_pages = pg_it->GetNumPages(); - auto it = this->memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { /* Check that the iterator is valid. */ - MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); /* Get the memory info. */ const KMemoryInfo info = it->GetMemoryInfo(); @@ -3972,10 +3975,10 @@ namespace ams::kern { memory_reservation.Commit(); /* Increase our tracked mapped size. */ - this->mapped_physical_memory_size += (size - mapped_size); + m_mapped_physical_memory_size += (size - mapped_size); /* Update the relevant memory blocks. */ - this->memory_block_manager.UpdateIfMatch(std::addressof(allocator), address, size / PageSize, + m_memory_block_manager.UpdateIfMatch(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None); @@ -3990,10 +3993,10 @@ namespace ams::kern { Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) { /* Lock the physical memory lock. */ - KScopedLightLock phys_lk(this->map_physical_memory_lock); + KScopedLightLock phys_lk(m_map_physical_memory_lock); /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Calculate the last address for convenience. */ const KProcessAddress last_address = address + size - 1; @@ -4009,10 +4012,10 @@ namespace ams::kern { cur_address = address; mapped_size = 0; - auto it = this->memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { /* Check that the iterator is valid. */ - MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); /* Get the memory info. */ const KMemoryInfo info = it->GetMemoryInfo(); @@ -4056,7 +4059,7 @@ namespace ams::kern { } /* Make a page group for the unmap region. */ - KPageGroup pg(this->block_info_manager); + KPageGroup pg(m_block_info_manager); { auto &impl = this->GetImpl(); @@ -4105,7 +4108,7 @@ namespace ams::kern { /* Create an update allocator. */ MESOSPHERE_ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -4123,7 +4126,7 @@ namespace ams::kern { cur_address = address; /* Iterate over the memory we unmapped. */ - auto it = this->memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); auto pg_it = pg.begin(); KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); size_t pg_pages = pg_it->GetNumPages(); @@ -4176,10 +4179,10 @@ namespace ams::kern { }; /* Iterate over the memory, unmapping as we go. */ - auto it = this->memory_block_manager.FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { /* Check that the iterator is valid. */ - MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + MESOSPHERE_ASSERT(it != m_memory_block_manager.end()); /* Get the memory info. */ const KMemoryInfo info = it->GetMemoryInfo(); @@ -4205,11 +4208,11 @@ namespace ams::kern { } /* Release the memory resource. */ - this->mapped_physical_memory_size -= mapped_size; + m_mapped_physical_memory_size -= mapped_size; GetCurrentProcess().ReleaseResource(ams::svc::LimitableResource_PhysicalMemoryMax, mapped_size); /* Update memory blocks. */ - this->memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); /* We succeeded. */ remap_guard.Cancel(); @@ -4224,7 +4227,7 @@ namespace ams::kern { auto reserve_guard = SCOPE_GUARD { Kernel::GetUnsafeMemory().Release(size); }; /* Create a page group for the new memory. */ - KPageGroup pg(this->block_info_manager); + KPageGroup pg(m_block_info_manager); /* Allocate the new memory. */ const size_t num_pages = size / PageSize; @@ -4235,20 +4238,20 @@ namespace ams::kern { /* Clear the new memory. */ for (const auto &block : pg) { - std::memset(GetVoidPointer(block.GetAddress()), this->heap_fill_value, block.GetSize()); + std::memset(GetVoidPointer(block.GetAddress()), m_heap_fill_value, block.GetSize()); } /* Map the new memory. */ { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check the memory state. */ size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -4259,10 +4262,10 @@ namespace ams::kern { R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties, OperationType_MapGroup, false)); /* Apply the memory block update. */ - this->memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); /* Update our mapped unsafe size. */ - this->mapped_unsafe_physical_memory += size; + m_mapped_unsafe_physical_memory += size; /* We succeeded. */ reserve_guard.Cancel(); @@ -4272,17 +4275,17 @@ namespace ams::kern { Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { /* Lock the table. */ - KScopedLightLock lk(this->general_lock); + KScopedLightLock lk(m_general_lock); /* Check whether we can unmap this much unsafe physical memory. */ - R_UNLESS(size <= this->mapped_unsafe_physical_memory, svc::ResultInvalidCurrentMemory()); + R_UNLESS(size <= m_mapped_unsafe_physical_memory, svc::ResultInvalidCurrentMemory()); /* Check the memory state. */ size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None)); /* Create an update allocator. */ - KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + KMemoryBlockManagerUpdateAllocator allocator(m_memory_block_slab_manager); R_TRY(allocator.Initialize(num_allocator_blocks)); /* We're going to perform an update, so create a helper. */ @@ -4294,13 +4297,13 @@ namespace ams::kern { R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null, false, unmap_properties, OperationType_Unmap, false)); /* Apply the memory block update. */ - this->memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal); /* Release the unsafe memory from the limit. */ Kernel::GetUnsafeMemory().Release(size); /* Update our mapped unsafe size. */ - this->mapped_unsafe_physical_memory -= size; + m_mapped_unsafe_physical_memory -= size; return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/kern_k_port.cpp b/libraries/libmesosphere/source/kern_k_port.cpp index a343a1887..af53eed2a 100644 --- a/libraries/libmesosphere/source/kern_k_port.cpp +++ b/libraries/libmesosphere/source/kern_k_port.cpp @@ -22,15 +22,15 @@ namespace ams::kern { this->Open(); /* Create and initialize our server/client pair. */ - KAutoObject::Create(std::addressof(this->server)); - KAutoObject::Create(std::addressof(this->client)); - this->server.Initialize(this); - this->client.Initialize(this, max_sessions); + KAutoObject::Create(std::addressof(m_server)); + KAutoObject::Create(std::addressof(m_client)); + m_server.Initialize(this); + m_client.Initialize(this, max_sessions); /* Set our member variables. */ - this->is_light = is_light; - this->name = name; - this->state = State::Normal; + m_is_light = is_light; + m_name = name; + m_state = State::Normal; } void KPort::OnClientClosed() { @@ -38,8 +38,8 @@ namespace ams::kern { KScopedSchedulerLock sl; - if (this->state == State::Normal) { - this->state = State::ClientClosed; + if (m_state == State::Normal) { + m_state = State::ClientClosed; } } @@ -48,26 +48,26 @@ namespace ams::kern { KScopedSchedulerLock sl; - if (this->state == State::Normal) { - this->state = State::ServerClosed; + if (m_state == State::Normal) { + m_state = State::ServerClosed; } } Result KPort::EnqueueSession(KServerSession *session) { KScopedSchedulerLock sl; - R_UNLESS(this->state == State::Normal, svc::ResultPortClosed()); + R_UNLESS(m_state == State::Normal, svc::ResultPortClosed()); - this->server.EnqueueSession(session); + m_server.EnqueueSession(session); return ResultSuccess(); } Result KPort::EnqueueSession(KLightServerSession *session) { KScopedSchedulerLock sl; - R_UNLESS(this->state == State::Normal, svc::ResultPortClosed()); + R_UNLESS(m_state == State::Normal, svc::ResultPortClosed()); - this->server.EnqueueSession(session); + m_server.EnqueueSession(session); return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index a562b3ee3..9b888f56e 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -79,43 +79,43 @@ namespace ams::kern { void KProcess::Finalize() { /* Delete the process local region. */ - this->DeleteThreadLocalRegion(this->plr_address); + this->DeleteThreadLocalRegion(m_plr_address); /* Get the used memory size. */ const size_t used_memory_size = this->GetUsedUserPhysicalMemorySize(); /* Finalize the page table. */ - this->page_table.Finalize(); + m_page_table.Finalize(); /* Free the system resource. */ - if (this->system_resource_address != Null) { + if (m_system_resource_address != Null) { /* Check that we have no outstanding allocations. */ - MESOSPHERE_ABORT_UNLESS(this->memory_block_slab_manager.GetUsed() == 0); - MESOSPHERE_ABORT_UNLESS(this->block_info_manager.GetUsed() == 0); - MESOSPHERE_ABORT_UNLESS(this->page_table_manager.GetUsed() == 0); + MESOSPHERE_ABORT_UNLESS(m_memory_block_slab_manager.GetUsed() == 0); + MESOSPHERE_ABORT_UNLESS(m_block_info_manager.GetUsed() == 0); + MESOSPHERE_ABORT_UNLESS(m_page_table_manager.GetUsed() == 0); /* Free the memory. */ - KSystemControl::FreeSecureMemory(this->system_resource_address, this->system_resource_num_pages * PageSize, this->memory_pool); + KSystemControl::FreeSecureMemory(m_system_resource_address, m_system_resource_num_pages * PageSize, m_memory_pool); /* Clear our tracking variables. */ - this->system_resource_address = Null; - this->system_resource_num_pages = 0; + m_system_resource_address = Null; + m_system_resource_num_pages = 0; /* Finalize optimized memory. If memory wasn't optimized, this is a no-op. */ - Kernel::GetMemoryManager().FinalizeOptimizedMemory(this->GetId(), this->memory_pool); + Kernel::GetMemoryManager().FinalizeOptimizedMemory(this->GetId(), m_memory_pool); } /* Release memory to the resource limit. */ - if (this->resource_limit != nullptr) { - MESOSPHERE_ABORT_UNLESS(used_memory_size >= this->memory_release_hint); - this->resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, used_memory_size, used_memory_size - this->memory_release_hint); - this->resource_limit->Close(); + if (m_resource_limit != nullptr) { + MESOSPHERE_ABORT_UNLESS(used_memory_size >= m_memory_release_hint); + m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, used_memory_size, used_memory_size - m_memory_release_hint); + m_resource_limit->Close(); } /* Free all shared memory infos. */ { - auto it = this->shared_memory_list.begin(); - while (it != this->shared_memory_list.end()) { + auto it = m_shared_memory_list.begin(); + while (it != m_shared_memory_list.end()) { KSharedMemoryInfo *info = std::addressof(*it); KSharedMemory *shmem = info->GetSharedMemory(); @@ -124,28 +124,28 @@ namespace ams::kern { } shmem->Close(); - it = this->shared_memory_list.erase(it); + it = m_shared_memory_list.erase(it); KSharedMemoryInfo::Free(info); } } /* Close all references to our betas. */ { - auto it = this->beta_list.begin(); - while (it != this->beta_list.end()) { + auto it = m_beta_list.begin(); + while (it != m_beta_list.end()) { KBeta *beta = std::addressof(*it); - it = this->beta_list.erase(it); + it = m_beta_list.erase(it); beta->Close(); } } /* Our thread local page list must be empty at this point. */ - MESOSPHERE_ABORT_UNLESS(this->partially_used_tlp_tree.empty()); - MESOSPHERE_ABORT_UNLESS(this->fully_used_tlp_tree.empty()); + MESOSPHERE_ABORT_UNLESS(m_partially_used_tlp_tree.empty()); + MESOSPHERE_ABORT_UNLESS(m_fully_used_tlp_tree.empty()); /* Log that we finalized for debug. */ - MESOSPHERE_LOG("KProcess::Finalize() pid=%ld name=%-12s\n", this->process_id, this->name); + MESOSPHERE_LOG("KProcess::Finalize() pid=%ld name=%-12s\n", m_process_id, m_name); /* Perform inherited finalization. */ KAutoObjectWithSlabHeapAndContainer::Finalize(); @@ -153,40 +153,40 @@ namespace ams::kern { Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms) { /* Validate that the intended kernel version is high enough for us to support. */ - R_UNLESS(this->capabilities.GetIntendedKernelVersion() >= ams::svc::RequiredKernelVersion, svc::ResultInvalidCombination()); + R_UNLESS(m_capabilities.GetIntendedKernelVersion() >= ams::svc::RequiredKernelVersion, svc::ResultInvalidCombination()); /* Validate that the intended kernel version isn't too high for us to support. */ - R_UNLESS(this->capabilities.GetIntendedKernelVersion() <= ams::svc::SupportedKernelVersion, svc::ResultInvalidCombination()); + R_UNLESS(m_capabilities.GetIntendedKernelVersion() <= ams::svc::SupportedKernelVersion, svc::ResultInvalidCombination()); /* Create and clear the process local region. */ - R_TRY(this->CreateThreadLocalRegion(std::addressof(this->plr_address))); - this->plr_heap_address = this->GetThreadLocalRegionPointer(this->plr_address); - std::memset(this->plr_heap_address, 0, ams::svc::ThreadLocalRegionSize); + R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address))); + m_plr_heap_address = this->GetThreadLocalRegionPointer(m_plr_address); + std::memset(m_plr_heap_address, 0, ams::svc::ThreadLocalRegionSize); /* Copy in the name from parameters. */ - static_assert(sizeof(params.name) < sizeof(this->name)); - std::memcpy(this->name, params.name, sizeof(params.name)); - this->name[sizeof(params.name)] = 0; + static_assert(sizeof(params.name) < sizeof(m_name)); + std::memcpy(m_name, params.name, sizeof(params.name)); + m_name[sizeof(params.name)] = 0; /* Set misc fields. */ - this->state = State_Created; - this->main_thread_stack_size = 0; - this->creation_time = KHardwareTimer::GetTick(); - this->used_kernel_memory_size = 0; - this->ideal_core_id = 0; - this->flags = params.flags; - this->version = params.version; - this->program_id = params.program_id; - this->code_address = params.code_address; - this->code_size = params.code_num_pages * PageSize; - this->is_application = (params.flags & ams::svc::CreateProcessFlag_IsApplication); - this->is_jit_debug = false; + m_state = State_Created; + m_main_thread_stack_size = 0; + m_creation_time = KHardwareTimer::GetTick(); + m_used_kernel_memory_size = 0; + m_ideal_core_id = 0; + m_flags = params.flags; + m_version = params.version; + m_program_id = params.program_id; + m_code_address = params.code_address; + m_code_size = params.code_num_pages * PageSize; + m_is_application = (params.flags & ams::svc::CreateProcessFlag_IsApplication); + m_is_jit_debug = false; /* Set thread fields. */ for (size_t i = 0; i < cpu::NumCores; i++) { - this->running_threads[i] = nullptr; - this->running_thread_idle_counts[i] = 0; - this->pinned_threads[i] = nullptr; + m_running_threads[i] = nullptr; + m_running_thread_idle_counts[i] = 0; + m_pinned_threads[i] = nullptr; } /* Set max memory based on address space type. */ @@ -194,36 +194,36 @@ namespace ams::kern { case ams::svc::CreateProcessFlag_AddressSpace32Bit: case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated: case ams::svc::CreateProcessFlag_AddressSpace64Bit: - this->max_process_memory = this->page_table.GetHeapRegionSize(); + m_max_process_memory = m_page_table.GetHeapRegionSize(); break; case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias: - this->max_process_memory = this->page_table.GetHeapRegionSize() + this->page_table.GetAliasRegionSize(); + m_max_process_memory = m_page_table.GetHeapRegionSize() + m_page_table.GetAliasRegionSize(); break; MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } /* Generate random entropy. */ - KSystemControl::GenerateRandomBytes(this->entropy, sizeof(this->entropy)); + KSystemControl::GenerateRandomBytes(m_entropy, sizeof(m_entropy)); /* Clear remaining fields. */ - this->num_threads = 0; - this->peak_num_threads = 0; - this->num_created_threads = 0; - this->num_process_switches = 0; - this->num_thread_switches = 0; - this->num_fpu_switches = 0; - this->num_supervisor_calls = 0; - this->num_ipc_messages = 0; + m_num_threads = 0; + m_peak_num_threads = 0; + m_num_created_threads = 0; + m_num_process_switches = 0; + m_num_thread_switches = 0; + m_num_fpu_switches = 0; + m_num_supervisor_calls = 0; + m_num_ipc_messages = 0; - this->is_signaled = false; - this->attached_object = nullptr; - this->exception_thread = nullptr; - this->is_suspended = false; - this->memory_release_hint = 0; - this->schedule_count = 0; + m_is_signaled = false; + m_attached_object = nullptr; + m_exception_thread = nullptr; + m_is_suspended = false; + m_memory_release_hint = 0; + m_schedule_count = 0; /* We're initialized! */ - this->is_initialized = true; + m_is_initialized = true; return ResultSuccess(); } @@ -234,10 +234,10 @@ namespace ams::kern { MESOSPHERE_ABORT_UNLESS((params.code_num_pages * PageSize) / PageSize == static_cast(params.code_num_pages)); /* Set members. */ - this->memory_pool = pool; - this->resource_limit = res_limit; - this->system_resource_address = Null; - this->system_resource_num_pages = 0; + m_memory_pool = pool; + m_resource_limit = res_limit; + m_system_resource_address = Null; + m_system_resource_num_pages = 0; /* Setup page table. */ /* NOTE: Nintendo passes process ID despite not having set it yet. */ @@ -250,29 +250,29 @@ namespace ams::kern { auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager()); auto *block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); auto *pt_manager = std::addressof(Kernel::GetPageTableManager()); - R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager)); + R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager)); } - auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); }; + auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); }; /* Ensure we can insert the code region. */ - R_UNLESS(this->page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion()); + R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion()); /* Map the code region. */ - R_TRY(this->page_table.MapPageGroup(params.code_address, pg, KMemoryState_Code, KMemoryPermission_KernelRead)); + R_TRY(m_page_table.MapPageGroup(params.code_address, pg, KMemoryState_Code, KMemoryPermission_KernelRead)); /* Initialize capabilities. */ - R_TRY(this->capabilities.Initialize(caps, num_caps, std::addressof(this->page_table))); + R_TRY(m_capabilities.Initialize(caps, num_caps, std::addressof(m_page_table))); /* Initialize the process id. */ - this->process_id = g_initial_process_id++; - MESOSPHERE_ABORT_UNLESS(InitialProcessIdMin <= this->process_id); - MESOSPHERE_ABORT_UNLESS(this->process_id <= InitialProcessIdMax); + m_process_id = g_initial_process_id++; + MESOSPHERE_ABORT_UNLESS(InitialProcessIdMin <= m_process_id); + MESOSPHERE_ABORT_UNLESS(m_process_id <= InitialProcessIdMax); /* Initialize the rest of the process. */ R_TRY(this->Initialize(params)); /* Open a reference to the resource limit. */ - this->resource_limit->Open(); + m_resource_limit->Open(); /* We succeeded! */ pt_guard.Cancel(); @@ -284,8 +284,8 @@ namespace ams::kern { MESOSPHERE_ASSERT(res_limit != nullptr); /* Set pool and resource limit. */ - this->memory_pool = pool; - this->resource_limit = res_limit; + m_memory_pool = pool; + m_resource_limit = res_limit; /* Get the memory sizes. */ const size_t code_num_pages = params.code_num_pages; @@ -302,27 +302,27 @@ namespace ams::kern { KBlockInfoManager *block_info_manager; KPageTableManager *pt_manager; - this->system_resource_address = Null; - this->system_resource_num_pages = 0; + m_system_resource_address = Null; + m_system_resource_num_pages = 0; if (system_resource_num_pages != 0) { /* Allocate secure memory. */ - R_TRY(KSystemControl::AllocateSecureMemory(std::addressof(this->system_resource_address), system_resource_size, pool)); + R_TRY(KSystemControl::AllocateSecureMemory(std::addressof(m_system_resource_address), system_resource_size, pool)); /* Set the number of system resource pages. */ - MESOSPHERE_ASSERT(this->system_resource_address != Null); - this->system_resource_num_pages = system_resource_num_pages; + MESOSPHERE_ASSERT(m_system_resource_address != Null); + m_system_resource_num_pages = system_resource_num_pages; /* Initialize managers. */ const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(system_resource_size), PageSize); - this->dynamic_page_manager.Initialize(this->system_resource_address + rc_size, system_resource_size - rc_size); - this->page_table_manager.Initialize(std::addressof(this->dynamic_page_manager), GetPointer(this->system_resource_address)); - this->memory_block_slab_manager.Initialize(std::addressof(this->dynamic_page_manager)); - this->block_info_manager.Initialize(std::addressof(this->dynamic_page_manager)); + m_dynamic_page_manager.Initialize(m_system_resource_address + rc_size, system_resource_size - rc_size); + m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager), GetPointer(m_system_resource_address)); + m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager)); + m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager)); - mem_block_manager = std::addressof(this->memory_block_slab_manager); - block_info_manager = std::addressof(this->block_info_manager); - pt_manager = std::addressof(this->page_table_manager); + mem_block_manager = std::addressof(m_memory_block_slab_manager); + block_info_manager = std::addressof(m_block_info_manager); + pt_manager = std::addressof(m_page_table_manager); } else { const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication); mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager()); @@ -332,18 +332,18 @@ namespace ams::kern { /* Ensure we don't leak any secure memory we allocated. */ auto sys_resource_guard = SCOPE_GUARD { - if (this->system_resource_address != Null) { + if (m_system_resource_address != Null) { /* Check that we have no outstanding allocations. */ - MESOSPHERE_ABORT_UNLESS(this->memory_block_slab_manager.GetUsed() == 0); - MESOSPHERE_ABORT_UNLESS(this->block_info_manager.GetUsed() == 0); - MESOSPHERE_ABORT_UNLESS(this->page_table_manager.GetUsed() == 0); + MESOSPHERE_ABORT_UNLESS(m_memory_block_slab_manager.GetUsed() == 0); + MESOSPHERE_ABORT_UNLESS(m_block_info_manager.GetUsed() == 0); + MESOSPHERE_ABORT_UNLESS(m_page_table_manager.GetUsed() == 0); /* Free the memory. */ - KSystemControl::FreeSecureMemory(this->system_resource_address, system_resource_size, pool); + KSystemControl::FreeSecureMemory(m_system_resource_address, system_resource_size, pool); /* Clear our tracking variables. */ - this->system_resource_address = Null; - this->system_resource_num_pages = 0; + m_system_resource_address = Null; + m_system_resource_num_pages = 0; } }; @@ -354,34 +354,34 @@ namespace ams::kern { const auto as_type = static_cast(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask); const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0; const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0; - R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager)); + R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager)); } - auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); }; + auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); }; /* Ensure we can insert the code region. */ - R_UNLESS(this->page_table.CanContain(params.code_address, code_size, KMemoryState_Code), svc::ResultInvalidMemoryRegion()); + R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState_Code), svc::ResultInvalidMemoryRegion()); /* Map the code region. */ - R_TRY(this->page_table.MapPages(params.code_address, code_num_pages, KMemoryState_Code, static_cast(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped))); + R_TRY(m_page_table.MapPages(params.code_address, code_num_pages, KMemoryState_Code, static_cast(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped))); /* Initialize capabilities. */ - R_TRY(this->capabilities.Initialize(user_caps, num_caps, std::addressof(this->page_table))); + R_TRY(m_capabilities.Initialize(user_caps, num_caps, std::addressof(m_page_table))); /* Initialize the process id. */ - this->process_id = g_process_id++; - MESOSPHERE_ABORT_UNLESS(ProcessIdMin <= this->process_id); - MESOSPHERE_ABORT_UNLESS(this->process_id <= ProcessIdMax); + m_process_id = g_process_id++; + MESOSPHERE_ABORT_UNLESS(ProcessIdMin <= m_process_id); + MESOSPHERE_ABORT_UNLESS(m_process_id <= ProcessIdMax); /* If we should optimize memory allocations, do so. */ - if (this->system_resource_address != Null && (params.flags & ams::svc::CreateProcessFlag_OptimizeMemoryAllocation) != 0) { - R_TRY(Kernel::GetMemoryManager().InitializeOptimizedMemory(this->process_id, pool)); + if (m_system_resource_address != Null && (params.flags & ams::svc::CreateProcessFlag_OptimizeMemoryAllocation) != 0) { + R_TRY(Kernel::GetMemoryManager().InitializeOptimizedMemory(m_process_id, pool)); } /* Initialize the rest of the process. */ R_TRY(this->Initialize(params)); /* Open a reference to the resource limit. */ - this->resource_limit->Open(); + m_resource_limit->Open(); /* We succeeded, so commit our memory reservation and cancel our guards. */ sys_resource_guard.Cancel(); @@ -407,14 +407,14 @@ namespace ams::kern { TerminateChildren(this, GetCurrentThreadPointer()); /* Finalize the handle tahble. */ - this->handle_table.Finalize(); + m_handle_table.Finalize(); } void KProcess::FinishTermination() { /* Release resource limit hint. */ - if (this->resource_limit != nullptr) { - this->memory_release_hint = this->GetUsedUserPhysicalMemorySize(); - this->resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, 0, this->memory_release_hint); + if (m_resource_limit != nullptr) { + m_memory_release_hint = this->GetUsedUserPhysicalMemorySize(); + m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, 0, m_memory_release_hint); } /* Change state. */ @@ -433,14 +433,14 @@ namespace ams::kern { /* Determine whether we need to start terminating */ bool needs_terminate = false; { - KScopedLightLock lk(this->state_lock); + KScopedLightLock lk(m_state_lock); KScopedSchedulerLock sl; - MESOSPHERE_ASSERT(this->state != State_Created); - MESOSPHERE_ASSERT(this->state != State_CreatedAttached); - MESOSPHERE_ASSERT(this->state != State_Crashed); - MESOSPHERE_ASSERT(this->state != State_Terminated); - if (this->state == State_Running || this->state == State_RunningAttached || this->state == State_DebugBreak) { + MESOSPHERE_ASSERT(m_state != State_Created); + MESOSPHERE_ASSERT(m_state != State_CreatedAttached); + MESOSPHERE_ASSERT(m_state != State_Crashed); + MESOSPHERE_ASSERT(m_state != State_Terminated); + if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_DebugBreak) { this->ChangeState(State_Terminating); needs_terminate = true; } @@ -451,7 +451,7 @@ namespace ams::kern { this->StartTermination(); /* Note for debug that we're exiting the process. */ - MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", this->process_id, this->name); + MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", m_process_id, m_name); /* Register the process as a work task. */ KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this); @@ -468,15 +468,15 @@ namespace ams::kern { /* Determine whether we need to start terminating */ bool needs_terminate = false; { - KScopedLightLock lk(this->state_lock); + KScopedLightLock lk(m_state_lock); /* Check whether we're allowed to terminate. */ - R_UNLESS(this->state != State_Created, svc::ResultInvalidState()); - R_UNLESS(this->state != State_CreatedAttached, svc::ResultInvalidState()); + R_UNLESS(m_state != State_Created, svc::ResultInvalidState()); + R_UNLESS(m_state != State_CreatedAttached, svc::ResultInvalidState()); KScopedSchedulerLock sl; - if (this->state == State_Running || this->state == State_RunningAttached || this->state == State_Crashed || this->state == State_DebugBreak) { + if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_Crashed || m_state == State_DebugBreak) { this->ChangeState(State_Terminating); needs_terminate = true; } @@ -488,7 +488,7 @@ namespace ams::kern { this->StartTermination(); /* Note for debug that we're terminating the process. */ - MESOSPHERE_LOG("KProcess::Terminate() pid=%ld name=%-12s\n", this->process_id, this->name); + MESOSPHERE_LOG("KProcess::Terminate() pid=%ld name=%-12s\n", m_process_id, m_name); /* Call the debug callback. */ KDebug::OnTerminateProcess(this); @@ -502,14 +502,14 @@ namespace ams::kern { Result KProcess::AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) { /* Lock ourselves, to prevent concurrent access. */ - KScopedLightLock lk(this->state_lock); + KScopedLightLock lk(m_state_lock); /* Address and size parameters aren't used. */ MESOSPHERE_UNUSED(address, size); /* Try to find an existing info for the memory. */ KSharedMemoryInfo *info = nullptr; - for (auto it = this->shared_memory_list.begin(); it != this->shared_memory_list.end(); ++it) { + for (auto it = m_shared_memory_list.begin(); it != m_shared_memory_list.end(); ++it) { if (it->GetSharedMemory() == shmem) { info = std::addressof(*it); break; @@ -524,7 +524,7 @@ namespace ams::kern { /* Initialize the info and add it to our list. */ info->Initialize(shmem); - this->shared_memory_list.push_back(*info); + m_shared_memory_list.push_back(*info); } /* Open a reference to the shared memory and its info. */ @@ -536,15 +536,15 @@ namespace ams::kern { void KProcess::RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) { /* Lock ourselves, to prevent concurrent access. */ - KScopedLightLock lk(this->state_lock); + KScopedLightLock lk(m_state_lock); /* Address and size parameters aren't used. */ MESOSPHERE_UNUSED(address, size); /* Find an existing info for the memory. */ KSharedMemoryInfo *info = nullptr; - auto it = this->shared_memory_list.begin(); - for (/* ... */; it != this->shared_memory_list.end(); ++it) { + auto it = m_shared_memory_list.begin(); + for (/* ... */; it != m_shared_memory_list.end(); ++it) { if (it->GetSharedMemory() == shmem) { info = std::addressof(*it); break; @@ -554,7 +554,7 @@ namespace ams::kern { /* Close a reference to the info and its memory. */ if (info->Close()) { - this->shared_memory_list.erase(it); + m_shared_memory_list.erase(it); KSharedMemoryInfo::Free(info); } @@ -569,14 +569,14 @@ namespace ams::kern { { KScopedSchedulerLock sl; - if (auto it = this->partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) { + if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) { tlr = it->Reserve(); MESOSPHERE_ABORT_UNLESS(tlr != Null); if (it->IsAllUsed()) { tlp = std::addressof(*it); - this->partially_used_tlp_tree.erase(it); - this->fully_used_tlp_tree.insert(*tlp); + m_partially_used_tlp_tree.erase(it); + m_fully_used_tlp_tree.insert(*tlp); } *out = tlr; @@ -600,9 +600,9 @@ namespace ams::kern { { KScopedSchedulerLock sl; if (tlp->IsAllUsed()) { - this->fully_used_tlp_tree.insert(*tlp); + m_fully_used_tlp_tree.insert(*tlp); } else { - this->partially_used_tlp_tree.insert(*tlp); + m_partially_used_tlp_tree.insert(*tlp); } } @@ -620,22 +620,22 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Try to find the page in the partially used list. */ - auto it = this->partially_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); - if (it == this->partially_used_tlp_tree.end()) { + auto it = m_partially_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); + if (it == m_partially_used_tlp_tree.end()) { /* If we don't find it, it has to be in the fully used list. */ - it = this->fully_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); - R_UNLESS(it != this->fully_used_tlp_tree.end(), svc::ResultInvalidAddress()); + it = m_fully_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); + R_UNLESS(it != m_fully_used_tlp_tree.end(), svc::ResultInvalidAddress()); /* Release the region. */ it->Release(addr); /* Move the page out of the fully used list. */ KThreadLocalPage *tlp = std::addressof(*it); - this->fully_used_tlp_tree.erase(it); + m_fully_used_tlp_tree.erase(it); if (tlp->IsAllFree()) { page_to_free = tlp; } else { - this->partially_used_tlp_tree.insert(*tlp); + m_partially_used_tlp_tree.insert(*tlp); } } else { /* Release the region. */ @@ -644,7 +644,7 @@ namespace ams::kern { /* Handle the all-free case. */ KThreadLocalPage *tlp = std::addressof(*it); if (tlp->IsAllFree()) { - this->partially_used_tlp_tree.erase(it); + m_partially_used_tlp_tree.erase(it); page_to_free = tlp; } } @@ -664,9 +664,9 @@ namespace ams::kern { KThreadLocalPage *tlp = nullptr; { KScopedSchedulerLock sl; - if (auto it = this->partially_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); it != this->partially_used_tlp_tree.end()) { + if (auto it = m_partially_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); it != m_partially_used_tlp_tree.end()) { tlp = std::addressof(*it); - } else if (auto it = this->fully_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); it != this->fully_used_tlp_tree.end()) { + } else if (auto it = m_fully_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); it != m_fully_used_tlp_tree.end()) { tlp = std::addressof(*it); } else { return nullptr; @@ -704,18 +704,18 @@ namespace ams::kern { } void KProcess::IncrementThreadCount() { - MESOSPHERE_ASSERT(this->num_threads >= 0); - ++this->num_created_threads; + MESOSPHERE_ASSERT(m_num_threads >= 0); + ++m_num_created_threads; - if (const auto count = ++this->num_threads; count > this->peak_num_threads) { - this->peak_num_threads = count; + if (const auto count = ++m_num_threads; count > m_peak_num_threads) { + m_peak_num_threads = count; } } void KProcess::DecrementThreadCount() { - MESOSPHERE_ASSERT(this->num_threads > 0); + MESOSPHERE_ASSERT(m_num_threads > 0); - if (const auto count = --this->num_threads; count == 0) { + if (const auto count = --m_num_threads; count == 0) { this->Terminate(); } } @@ -726,8 +726,8 @@ namespace ams::kern { MESOSPHERE_ASSERT(this == cur_thread->GetOwnerProcess()); /* Try to claim the exception thread. */ - if (this->exception_thread != cur_thread) { - const uintptr_t address_key = reinterpret_cast(std::addressof(this->exception_thread)); + if (m_exception_thread != cur_thread) { + const uintptr_t address_key = reinterpret_cast(std::addressof(m_exception_thread)); while (true) { { KScopedSchedulerLock sl; @@ -738,14 +738,14 @@ namespace ams::kern { } /* If we have no exception thread, we succeeded. */ - if (this->exception_thread == nullptr) { - this->exception_thread = cur_thread; + if (m_exception_thread == nullptr) { + m_exception_thread = cur_thread; return true; } /* Otherwise, wait for us to not have an exception thread. */ cur_thread->SetAddressKey(address_key); - this->exception_thread->AddWaiter(cur_thread); + m_exception_thread->AddWaiter(cur_thread); if (cur_thread->GetState() == KThread::ThreadState_Runnable) { cur_thread->SetState(KThread::ThreadState_Waiting); } else { @@ -774,12 +774,12 @@ namespace ams::kern { bool KProcess::ReleaseUserException(KThread *thread) { KScopedSchedulerLock sl; - if (this->exception_thread == thread) { - this->exception_thread = nullptr; + if (m_exception_thread == thread) { + m_exception_thread = nullptr; /* Remove waiter thread. */ s32 num_waiters; - KThread *next = thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast(std::addressof(this->exception_thread))); + KThread *next = thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast(std::addressof(m_exception_thread))); if (next != nullptr) { if (next->GetState() == KThread::ThreadState_Waiting) { next->SetState(KThread::ThreadState_Runnable); @@ -795,30 +795,30 @@ namespace ams::kern { } void KProcess::RegisterThread(KThread *thread) { - KScopedLightLock lk(this->list_lock); + KScopedLightLock lk(m_list_lock); - this->thread_list.push_back(*thread); + m_thread_list.push_back(*thread); } void KProcess::UnregisterThread(KThread *thread) { - KScopedLightLock lk(this->list_lock); + KScopedLightLock lk(m_list_lock); - this->thread_list.erase(this->thread_list.iterator_to(*thread)); + m_thread_list.erase(m_thread_list.iterator_to(*thread)); } size_t KProcess::GetUsedUserPhysicalMemorySize() const { - const size_t norm_size = this->page_table.GetNormalMemorySize(); - const size_t other_size = this->code_size + this->main_thread_stack_size; - const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(this->system_resource_num_pages * PageSize, this->memory_pool); + const size_t norm_size = m_page_table.GetNormalMemorySize(); + const size_t other_size = m_code_size + m_main_thread_stack_size; + const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(m_system_resource_num_pages * PageSize, m_memory_pool); return norm_size + other_size + sec_size; } size_t KProcess::GetTotalUserPhysicalMemorySize() const { /* Get the amount of free and used size. */ - const size_t free_size = this->resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax); + const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax); const size_t used_size = this->GetUsedNonSystemUserPhysicalMemorySize(); - const size_t max_size = this->max_process_memory; + const size_t max_size = m_max_process_memory; if (used_size + free_size > max_size) { return max_size; @@ -828,18 +828,18 @@ namespace ams::kern { } size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const { - const size_t norm_size = this->page_table.GetNormalMemorySize(); - const size_t other_size = this->code_size + this->main_thread_stack_size; + const size_t norm_size = m_page_table.GetNormalMemorySize(); + const size_t other_size = m_code_size + m_main_thread_stack_size; return norm_size + other_size; } size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const { /* Get the amount of free and used size. */ - const size_t free_size = this->resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax); + const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax); const size_t used_size = this->GetUsedUserPhysicalMemorySize(); - const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(this->system_resource_num_pages * PageSize, this->memory_pool); - const size_t max_size = this->max_process_memory; + const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(m_system_resource_num_pages * PageSize, m_memory_pool); + const size_t max_size = m_max_process_memory; if (used_size + free_size > max_size) { return max_size - sec_size; @@ -852,10 +852,10 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Lock ourselves, to prevent concurrent access. */ - KScopedLightLock lk(this->state_lock); + KScopedLightLock lk(m_state_lock); /* Validate that we're in a state where we can initialize. */ - const auto state = this->state; + const auto state = m_state; R_UNLESS(state == State_Created || state == State_CreatedAttached, svc::ResultInvalidState()); /* Place a tentative reservation of a thread for this process. */ @@ -863,12 +863,12 @@ namespace ams::kern { R_UNLESS(thread_reservation.Succeeded(), svc::ResultLimitReached()); /* Ensure that we haven't already allocated stack. */ - MESOSPHERE_ABORT_UNLESS(this->main_thread_stack_size == 0); + MESOSPHERE_ABORT_UNLESS(m_main_thread_stack_size == 0); /* Ensure that we're allocating a valid stack. */ stack_size = util::AlignUp(stack_size, PageSize); - R_UNLESS(stack_size + this->code_size <= this->max_process_memory, svc::ResultOutOfMemory()); - R_UNLESS(stack_size + this->code_size >= this->code_size, svc::ResultOutOfMemory()); + R_UNLESS(stack_size + m_code_size <= m_max_process_memory, svc::ResultOutOfMemory()); + R_UNLESS(stack_size + m_code_size >= m_code_size, svc::ResultOutOfMemory()); /* Place a tentative reservation of memory for our new stack. */ KScopedResourceReservation mem_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax, stack_size); @@ -878,26 +878,26 @@ namespace ams::kern { KProcessAddress stack_top = Null; if (stack_size) { KProcessAddress stack_bottom; - R_TRY(this->page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, KMemoryState_Stack, KMemoryPermission_UserReadWrite)); + R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, KMemoryState_Stack, KMemoryPermission_UserReadWrite)); stack_top = stack_bottom + stack_size; - this->main_thread_stack_size = stack_size; + m_main_thread_stack_size = stack_size; } /* Ensure our stack is safe to clean up on exit. */ auto stack_guard = SCOPE_GUARD { - if (this->main_thread_stack_size) { - MESOSPHERE_R_ABORT_UNLESS(this->page_table.UnmapPages(stack_top - this->main_thread_stack_size, this->main_thread_stack_size / PageSize, KMemoryState_Stack)); - this->main_thread_stack_size = 0; + if (m_main_thread_stack_size) { + MESOSPHERE_R_ABORT_UNLESS(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size, m_main_thread_stack_size / PageSize, KMemoryState_Stack)); + m_main_thread_stack_size = 0; } }; /* Set our maximum heap size. */ - R_TRY(this->page_table.SetMaxHeapSize(this->max_process_memory - (this->main_thread_stack_size + this->code_size))); + R_TRY(m_page_table.SetMaxHeapSize(m_max_process_memory - (m_main_thread_stack_size + m_code_size))); /* Initialize our handle table. */ - R_TRY(this->handle_table.Initialize(this->capabilities.GetHandleTableSize())); - auto ht_guard = SCOPE_GUARD { this->handle_table.Finalize(); }; + R_TRY(m_handle_table.Initialize(m_capabilities.GetHandleTableSize())); + auto ht_guard = SCOPE_GUARD { m_handle_table.Finalize(); }; /* Create a new thread for the process. */ KThread *main_thread = KThread::Create(); @@ -905,7 +905,7 @@ namespace ams::kern { auto thread_guard = SCOPE_GUARD { main_thread->Close(); }; /* Initialize the thread. */ - R_TRY(KThread::InitializeUserThread(main_thread, reinterpret_cast(GetVoidPointer(this->GetEntryPoint())), 0, stack_top, priority, this->ideal_core_id, this)); + R_TRY(KThread::InitializeUserThread(main_thread, reinterpret_cast(GetVoidPointer(this->GetEntryPoint())), 0, stack_top, priority, m_ideal_core_id, this)); /* Register the thread, and commit our reservation. */ KThread::Register(main_thread); @@ -913,7 +913,7 @@ namespace ams::kern { /* Add the thread to our handle table. */ ams::svc::Handle thread_handle; - R_TRY(this->handle_table.Add(std::addressof(thread_handle), main_thread)); + R_TRY(m_handle_table.Add(std::addressof(thread_handle), main_thread)); /* Set the thread arguments. */ main_thread->GetContext().SetArguments(0, thread_handle); @@ -933,7 +933,7 @@ namespace ams::kern { mem_reservation.Commit(); /* Note for debug that we're running a new process. */ - MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", this->process_id, this->name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore()); + MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", m_process_id, m_name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore()); return ResultSuccess(); } @@ -942,32 +942,32 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Lock the process and the scheduler. */ - KScopedLightLock lk(this->state_lock); + KScopedLightLock lk(m_state_lock); KScopedSchedulerLock sl; /* Validate that we're in a state that we can reset. */ - R_UNLESS(this->state != State_Terminated, svc::ResultInvalidState()); - R_UNLESS(this->is_signaled, svc::ResultInvalidState()); + R_UNLESS(m_state != State_Terminated, svc::ResultInvalidState()); + R_UNLESS(m_is_signaled, svc::ResultInvalidState()); /* Clear signaled. */ - this->is_signaled = false; + m_is_signaled = false; return ResultSuccess(); } Result KProcess::SetActivity(ams::svc::ProcessActivity activity) { /* Lock ourselves and the scheduler. */ - KScopedLightLock lk(this->state_lock); - KScopedLightLock list_lk(this->list_lock); + KScopedLightLock lk(m_state_lock); + KScopedLightLock list_lk(m_list_lock); KScopedSchedulerLock sl; /* Validate our state. */ - R_UNLESS(this->state != State_Terminating, svc::ResultInvalidState()); - R_UNLESS(this->state != State_Terminated, svc::ResultInvalidState()); + R_UNLESS(m_state != State_Terminating, svc::ResultInvalidState()); + R_UNLESS(m_state != State_Terminated, svc::ResultInvalidState()); /* Either pause or resume. */ if (activity == ams::svc::ProcessActivity_Paused) { /* Verify that we're not suspended. */ - R_UNLESS(!this->is_suspended, svc::ResultInvalidState()); + R_UNLESS(!m_is_suspended, svc::ResultInvalidState()); /* Suspend all threads. */ auto end = this->GetThreadList().end(); @@ -981,7 +981,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(activity == ams::svc::ProcessActivity_Runnable); /* Verify that we're suspended. */ - R_UNLESS(this->is_suspended, svc::ResultInvalidState()); + R_UNLESS(m_is_suspended, svc::ResultInvalidState()); /* Resume all threads. */ auto end = this->GetThreadList().end(); @@ -1028,7 +1028,7 @@ namespace ams::kern { Result KProcess::GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer out_thread_ids, s32 max_out_count) { /* Lock the list. */ - KScopedLightLock lk(this->list_lock); + KScopedLightLock lk(m_list_lock); /* Iterate over the list. */ s32 count = 0; @@ -1059,17 +1059,17 @@ namespace ams::kern { MESOSPHERE_ASSERT(debug_object != nullptr); /* Cache our state to return it to the debug object. */ - const auto old_state = this->state; + const auto old_state = m_state; /* Set the object. */ - this->attached_object = debug_object; + m_attached_object = debug_object; /* Check that our state is valid for attach. */ - MESOSPHERE_ASSERT(this->state == State_Created || this->state == State_Running || this->state == State_Crashed); + MESOSPHERE_ASSERT(m_state == State_Created || m_state == State_Running || m_state == State_Crashed); /* Update our state. */ - if (this->state != State_DebugBreak) { - if (this->state == State_Created) { + if (m_state != State_DebugBreak) { + if (m_state == State_Created) { this->ChangeState(State_CreatedAttached); } else { this->ChangeState(State_DebugBreak); @@ -1084,15 +1084,15 @@ namespace ams::kern { MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); /* Clear the attached object. */ - this->attached_object = nullptr; + m_attached_object = nullptr; /* Validate that the process is in an attached state. */ - MESOSPHERE_ASSERT(this->state == State_CreatedAttached || this->state == State_RunningAttached || this->state == State_DebugBreak || this->state == State_Terminating || this->state == State_Terminated); + MESOSPHERE_ASSERT(m_state == State_CreatedAttached || m_state == State_RunningAttached || m_state == State_DebugBreak || m_state == State_Terminating || m_state == State_Terminated); /* Change the state appropriately. */ - if (this->state == State_CreatedAttached) { + if (m_state == State_CreatedAttached) { this->ChangeState(State_Created); - } else if (this->state == State_RunningAttached || this->state == State_DebugBreak) { + } else if (m_state == State_RunningAttached || m_state == State_DebugBreak) { /* Disallow transition back to created from running. */ if (old_state == State_Created) { old_state = State_Running; @@ -1107,20 +1107,20 @@ namespace ams::kern { MESOSPHERE_ASSERT(this == GetCurrentProcessPointer()); /* If we aren't allowed to enter jit debug, don't. */ - if ((this->flags & ams::svc::CreateProcessFlag_EnableDebug) == 0) { + if ((m_flags & ams::svc::CreateProcessFlag_EnableDebug) == 0) { return false; } /* We're the current process, so we should be some kind of running. */ - MESOSPHERE_ASSERT(this->state != State_Created); - MESOSPHERE_ASSERT(this->state != State_CreatedAttached); - MESOSPHERE_ASSERT(this->state != State_Terminated); + MESOSPHERE_ASSERT(m_state != State_Created); + MESOSPHERE_ASSERT(m_state != State_CreatedAttached); + MESOSPHERE_ASSERT(m_state != State_Terminated); /* Try to enter JIT debug. */ while (true) { /* Lock ourselves and the scheduler. */ - KScopedLightLock lk(this->state_lock); - KScopedLightLock list_lk(this->list_lock); + KScopedLightLock lk(m_state_lock); + KScopedLightLock list_lk(m_list_lock); KScopedSchedulerLock sl; /* If we're attached to a debugger, we're necessarily in debug. */ @@ -1134,12 +1134,12 @@ namespace ams::kern { } /* We're not attached to debugger, so check that. */ - MESOSPHERE_ASSERT(this->state != State_RunningAttached); - MESOSPHERE_ASSERT(this->state != State_DebugBreak); + MESOSPHERE_ASSERT(m_state != State_RunningAttached); + MESOSPHERE_ASSERT(m_state != State_DebugBreak); /* If we're terminating, we can't enter debug. */ - if (this->state != State_Running && this->state != State_Crashed) { - MESOSPHERE_ASSERT(this->state == State_Terminating); + if (m_state != State_Running && m_state != State_Crashed) { + MESOSPHERE_ASSERT(m_state == State_Terminating); return false; } @@ -1160,14 +1160,14 @@ namespace ams::kern { this->ChangeState(State_Crashed); /* Enter jit debug. */ - this->is_jit_debug = true; - this->jit_debug_event_type = event; - this->jit_debug_exception_type = exception; - this->jit_debug_params[0] = param1; - this->jit_debug_params[1] = param2; - this->jit_debug_params[2] = param3; - this->jit_debug_params[3] = param4; - this->jit_debug_thread_id = GetCurrentThread().GetId(); + m_is_jit_debug = true; + m_jit_debug_event_type = event; + m_jit_debug_exception_type = exception; + m_jit_debug_params[0] = param1; + m_jit_debug_params[1] = param2; + m_jit_debug_params[2] = param3; + m_jit_debug_params[3] = param4; + m_jit_debug_thread_id = GetCurrentThread().GetId(); /* Exit our retry loop. */ break; @@ -1177,7 +1177,7 @@ namespace ams::kern { { KScopedSchedulerLock sl; - if (this->state == State_Running || this->state == State_RunningAttached || this->state == State_Crashed || this->state == State_DebugBreak) { + if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_Crashed || m_state == State_DebugBreak) { return true; } } @@ -1189,8 +1189,8 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); - if (this->is_jit_debug) { - return KDebugBase::CreateDebugEvent(this->jit_debug_event_type, this->jit_debug_exception_type, this->jit_debug_params[0], this->jit_debug_params[1], this->jit_debug_params[2], this->jit_debug_params[3], this->jit_debug_thread_id); + if (m_is_jit_debug) { + return KDebugBase::CreateDebugEvent(m_jit_debug_event_type, m_jit_debug_exception_type, m_jit_debug_params[0], m_jit_debug_params[1], m_jit_debug_params[2], m_jit_debug_params[3], m_jit_debug_thread_id); } else { return nullptr; } @@ -1200,7 +1200,7 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); - this->is_jit_debug = false; + m_is_jit_debug = false; } KProcess *KProcess::GetProcessFromId(u64 process_id) { diff --git a/libraries/libmesosphere/source/kern_k_readable_event.cpp b/libraries/libmesosphere/source/kern_k_readable_event.cpp index 6b06dea6a..e3e2878e9 100644 --- a/libraries/libmesosphere/source/kern_k_readable_event.cpp +++ b/libraries/libmesosphere/source/kern_k_readable_event.cpp @@ -21,13 +21,13 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); - return this->is_signaled; + return m_is_signaled; } void KReadableEvent::Destroy() { MESOSPHERE_ASSERT_THIS(); - if (this->parent_event) { - this->parent_event->Close(); + if (m_parent) { + m_parent->Close(); } } @@ -36,8 +36,8 @@ namespace ams::kern { KScopedSchedulerLock lk; - if (!this->is_signaled) { - this->is_signaled = true; + if (!m_is_signaled) { + m_is_signaled = true; this->NotifyAvailable(); } @@ -57,9 +57,9 @@ namespace ams::kern { KScopedSchedulerLock lk; - R_UNLESS(this->is_signaled, svc::ResultInvalidState()); + R_UNLESS(m_is_signaled, svc::ResultInvalidState()); - this->is_signaled = false; + m_is_signaled = false; return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/kern_k_resource_limit.cpp b/libraries/libmesosphere/source/kern_k_resource_limit.cpp index dca3419dc..04cf4873b 100644 --- a/libraries/libmesosphere/source/kern_k_resource_limit.cpp +++ b/libraries/libmesosphere/source/kern_k_resource_limit.cpp @@ -27,12 +27,12 @@ namespace ams::kern { /* This should be unnecessary for us, because our constructor will clear all fields. */ /* The following is analagous to what Nintendo's implementation (no constexpr constructor) would do, though. */ /* - this->waiter_count = 0; - for (size_t i = 0; i < util::size(this->limit_values); i++) { - this->limit_values[i] = 0; - this->current_values[i] = 0; - this->current_hints[i] = 0; - this->peak_values[i] = 0; + m_waiter_count = 0; + for (size_t i = 0; i < util::size(m_limit_values); i++) { + m_limit_values[i] = 0; + m_current_values[i] = 0; + m_current_hints[i] = 0; + m_peak_values[i] = 0; } */ } @@ -46,11 +46,11 @@ namespace ams::kern { s64 value; { - KScopedLightLock lk(this->lock); - value = this->limit_values[which]; + KScopedLightLock lk(m_lock); + value = m_limit_values[which]; MESOSPHERE_ASSERT(value >= 0); - MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); - MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); + MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]); + MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); } return value; @@ -61,11 +61,11 @@ namespace ams::kern { s64 value; { - KScopedLightLock lk(this->lock); - value = this->current_values[which]; + KScopedLightLock lk(m_lock); + value = m_current_values[which]; MESOSPHERE_ASSERT(value >= 0); - MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); - MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); + MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]); + MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); } return value; @@ -76,11 +76,11 @@ namespace ams::kern { s64 value; { - KScopedLightLock lk(this->lock); - value = this->peak_values[which]; + KScopedLightLock lk(m_lock); + value = m_peak_values[which]; MESOSPHERE_ASSERT(value >= 0); - MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); - MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); + MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]); + MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); } return value; @@ -91,11 +91,11 @@ namespace ams::kern { s64 value; { - KScopedLightLock lk(this->lock); - MESOSPHERE_ASSERT(this->current_values[which] >= 0); - MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); - MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); - value = this->limit_values[which] - this->current_values[which]; + KScopedLightLock lk(m_lock); + MESOSPHERE_ASSERT(m_current_values[which] >= 0); + MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]); + MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); + value = m_limit_values[which] - m_current_values[which]; } return value; @@ -104,10 +104,10 @@ namespace ams::kern { Result KResourceLimit::SetLimitValue(ams::svc::LimitableResource which, s64 value) { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); - R_UNLESS(this->current_values[which] <= value, svc::ResultInvalidState()); + KScopedLightLock lk(m_lock); + R_UNLESS(m_current_values[which] <= value, svc::ResultInvalidState()); - this->limit_values[which] = value; + m_limit_values[which] = value; return ResultSuccess(); } @@ -120,34 +120,34 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(value >= 0); - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); - MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); - if (this->current_hints[which] >= this->limit_values[which]) { + MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); + if (m_current_hints[which] >= m_limit_values[which]) { return false; } /* Loop until we reserve or run out of time. */ while (true) { - MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); - MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); + MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]); + MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); /* If we would overflow, don't allow to succeed. */ - if (this->current_values[which] + value <= this->current_values[which]) { + if (m_current_values[which] + value <= m_current_values[which]) { break; } - if (this->current_values[which] + value <= this->limit_values[which]) { - this->current_values[which] += value; - this->current_hints[which] += value; - this->peak_values[which] = std::max(this->peak_values[which], this->current_values[which]); + if (m_current_values[which] + value <= m_limit_values[which]) { + m_current_values[which] += value; + m_current_hints[which] += value; + m_peak_values[which] = std::max(m_peak_values[which], m_current_values[which]); return true; } - if (this->current_hints[which] + value <= this->limit_values[which] && (timeout < 0 || KHardwareTimer::GetTick() < timeout)) { - this->waiter_count++; - this->cond_var.Wait(&this->lock, timeout); - this->waiter_count--; + if (m_current_hints[which] + value <= m_limit_values[which] && (timeout < 0 || KHardwareTimer::GetTick() < timeout)) { + m_waiter_count++; + m_cond_var.Wait(&m_lock, timeout); + m_waiter_count--; } else { break; } @@ -165,17 +165,17 @@ namespace ams::kern { MESOSPHERE_ASSERT(value >= 0); MESOSPHERE_ASSERT(hint >= 0); - KScopedLightLock lk(this->lock); - MESOSPHERE_ASSERT(this->current_values[which] <= this->limit_values[which]); - MESOSPHERE_ASSERT(this->current_hints[which] <= this->current_values[which]); - MESOSPHERE_ASSERT(value <= this->current_values[which]); - MESOSPHERE_ASSERT(hint <= this->current_hints[which]); + KScopedLightLock lk(m_lock); + MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]); + MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); + MESOSPHERE_ASSERT(value <= m_current_values[which]); + MESOSPHERE_ASSERT(hint <= m_current_hints[which]); - this->current_values[which] -= value; - this->current_hints[which] -= hint; + m_current_values[which] -= value; + m_current_hints[which] -= hint; - if (this->waiter_count != 0) { - this->cond_var.Broadcast(); + if (m_waiter_count != 0) { + m_cond_var.Broadcast(); } } diff --git a/libraries/libmesosphere/source/kern_k_scheduler.cpp b/libraries/libmesosphere/source/kern_k_scheduler.cpp index 227843bc2..22951af90 100644 --- a/libraries/libmesosphere/source/kern_k_scheduler.cpp +++ b/libraries/libmesosphere/source/kern_k_scheduler.cpp @@ -53,9 +53,9 @@ namespace ams::kern { void KScheduler::Initialize(KThread *idle_thread) { /* Set core ID and idle thread. */ - this->core_id = GetCurrentCoreId(); - this->idle_thread = idle_thread; - this->state.idle_thread_stack = this->idle_thread->GetStackTop(); + m_core_id = GetCurrentCoreId(); + m_idle_thread = idle_thread; + m_state.idle_thread_stack = m_idle_thread->GetStackTop(); /* Insert the main thread into the priority queue. */ { @@ -65,48 +65,48 @@ namespace ams::kern { } /* Bind interrupt handler. */ - Kernel::GetInterruptManager().BindHandler(GetSchedulerInterruptTask(), KInterruptName_Scheduler, this->core_id, KInterruptController::PriorityLevel_Scheduler, false, false); + Kernel::GetInterruptManager().BindHandler(GetSchedulerInterruptTask(), KInterruptName_Scheduler, m_core_id, KInterruptController::PriorityLevel_Scheduler, false, false); /* Set the current thread. */ - this->current_thread = GetCurrentThreadPointer(); + m_current_thread = GetCurrentThreadPointer(); } void KScheduler::Activate() { MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); - this->state.should_count_idle = KTargetSystem::IsDebugMode(); - this->is_active = true; + m_state.should_count_idle = KTargetSystem::IsDebugMode(); + m_is_active = true; RescheduleCurrentCore(); } void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) { - if (const u64 core_mask = cores_needing_scheduling & ~(1ul << this->core_id); core_mask != 0) { + if (const u64 core_mask = cores_needing_scheduling & ~(1ul << m_core_id); core_mask != 0) { cpu::DataSynchronizationBarrier(); Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_Scheduler, core_mask); } } u64 KScheduler::UpdateHighestPriorityThread(KThread *highest_thread) { - if (KThread *prev_highest_thread = this->state.highest_priority_thread; AMS_LIKELY(prev_highest_thread != highest_thread)) { + if (KThread *prev_highest_thread = m_state.highest_priority_thread; AMS_LIKELY(prev_highest_thread != highest_thread)) { if (AMS_LIKELY(prev_highest_thread != nullptr)) { IncrementScheduledCount(prev_highest_thread); prev_highest_thread->SetLastScheduledTick(KHardwareTimer::GetTick()); } - if (this->state.should_count_idle) { + if (m_state.should_count_idle) { if (AMS_LIKELY(highest_thread != nullptr)) { if (KProcess *process = highest_thread->GetOwnerProcess(); process != nullptr) { - process->SetRunningThread(this->core_id, highest_thread, this->state.idle_count); + process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count); } } else { - this->state.idle_count++; + m_state.idle_count++; } } - MESOSPHERE_KTRACE_SCHEDULE_UPDATE(this->core_id, (prev_highest_thread != nullptr ? prev_highest_thread : this->idle_thread), (highest_thread != nullptr ? highest_thread : this->idle_thread)); + MESOSPHERE_KTRACE_SCHEDULE_UPDATE(m_core_id, (prev_highest_thread != nullptr ? prev_highest_thread : m_idle_thread), (highest_thread != nullptr ? highest_thread : m_idle_thread)); - this->state.highest_priority_thread = highest_thread; - this->state.needs_scheduling = true; - return (1ul << this->core_id); + m_state.highest_priority_thread = highest_thread; + m_state.needs_scheduling = true; + return (1ul << m_core_id); } else { return 0; } @@ -227,7 +227,7 @@ namespace ams::kern { /* We never want to schedule a null thread, so use the idle thread if we don't have a next. */ if (next_thread == nullptr) { - next_thread = this->idle_thread; + next_thread = m_idle_thread; } /* If we're not actually switching thread, there's nothing to do. */ @@ -239,31 +239,31 @@ namespace ams::kern { MESOSPHERE_ASSERT(next_thread->GetDisableDispatchCount() == 1); /* Update the CPU time tracking variables. */ - const s64 prev_tick = this->last_context_switch_time; + const s64 prev_tick = m_last_context_switch_time; const s64 cur_tick = KHardwareTimer::GetTick(); const s64 tick_diff = cur_tick - prev_tick; - cur_thread->AddCpuTime(this->core_id, tick_diff); + cur_thread->AddCpuTime(m_core_id, tick_diff); if (cur_process != nullptr) { cur_process->AddCpuTime(tick_diff); } - this->last_context_switch_time = cur_tick; + m_last_context_switch_time = cur_tick; /* Update our previous thread. */ if (cur_process != nullptr) { /* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */ - if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == this->core_id)) { - this->prev_thread = cur_thread; + if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == m_core_id)) { + m_prev_thread = cur_thread; } else { - this->prev_thread = nullptr; + m_prev_thread = nullptr; } - } else if (cur_thread == this->idle_thread) { - this->prev_thread = nullptr; + } else if (cur_thread == m_idle_thread) { + m_prev_thread = nullptr; } MESOSPHERE_KTRACE_THREAD_SWITCH(next_thread); - if (next_thread->GetCurrentCore() != this->core_id) { - next_thread->SetCurrentCore(this->core_id); + if (next_thread->GetCurrentCore() != m_core_id) { + next_thread->SetCurrentCore(m_core_id); } /* Switch the current process, if we're switching processes. */ @@ -273,7 +273,7 @@ namespace ams::kern { /* Set the new thread. */ SetCurrentThread(next_thread); - this->current_thread = next_thread; + m_current_thread = next_thread; /* Set the new Thread Local region. */ cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress())); @@ -283,7 +283,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); for (size_t i = 0; i < cpu::NumCores; ++i) { /* Get an atomic reference to the core scheduler's previous thread. */ - std::atomic_ref prev_thread(Kernel::GetScheduler(static_cast(i)).prev_thread); + std::atomic_ref prev_thread(Kernel::GetScheduler(static_cast(i)).m_prev_thread); static_assert(std::atomic_ref::is_always_lock_free); /* Atomically clear the previous thread if it's our target. */ @@ -496,7 +496,7 @@ namespace ams::kern { /* Check if the suggested thread is the thread running on its core. */ const s32 suggested_core = suggested->GetActiveCore(); - if (KThread *running_on_suggested_core = (suggested_core >= 0) ? Kernel::GetScheduler(suggested_core).state.highest_priority_thread : nullptr; running_on_suggested_core != suggested) { + if (KThread *running_on_suggested_core = (suggested_core >= 0) ? Kernel::GetScheduler(suggested_core).m_state.highest_priority_thread : nullptr; running_on_suggested_core != suggested) { /* If the current thread's priority is higher than our suggestion's we prefer the next thread to the suggestion. */ /* We also prefer the next thread when the current thread's priority is equal to the suggestions, but the next thread has been waiting longer. */ if ((suggested->GetPriority() > cur_thread.GetPriority()) || diff --git a/libraries/libmesosphere/source/kern_k_server_port.cpp b/libraries/libmesosphere/source/kern_k_server_port.cpp index da8482577..60c354a5d 100644 --- a/libraries/libmesosphere/source/kern_k_server_port.cpp +++ b/libraries/libmesosphere/source/kern_k_server_port.cpp @@ -19,7 +19,7 @@ namespace ams::kern { void KServerPort::Initialize(KPort *parent) { /* Set member variables. */ - this->parent = parent; + m_parent = parent; } bool KServerPort::IsLight() const { @@ -29,9 +29,9 @@ namespace ams::kern { void KServerPort::CleanupSessions() { /* Ensure our preconditions are met. */ if (this->IsLight()) { - MESOSPHERE_ASSERT(this->session_list.empty()); + MESOSPHERE_ASSERT(m_session_list.empty()); } else { - MESOSPHERE_ASSERT(this->light_session_list.empty()); + MESOSPHERE_ASSERT(m_light_session_list.empty()); } /* Cleanup the session list. */ @@ -40,9 +40,9 @@ namespace ams::kern { KServerSession *session = nullptr; { KScopedSchedulerLock sl; - while (!this->session_list.empty()) { - session = std::addressof(this->session_list.front()); - this->session_list.pop_front(); + while (!m_session_list.empty()) { + session = std::addressof(m_session_list.front()); + m_session_list.pop_front(); } } @@ -60,9 +60,9 @@ namespace ams::kern { KLightServerSession *session = nullptr; { KScopedSchedulerLock sl; - while (!this->light_session_list.empty()) { - session = std::addressof(this->light_session_list.front()); - this->light_session_list.pop_front(); + while (!m_light_session_list.empty()) { + session = std::addressof(m_light_session_list.front()); + m_light_session_list.pop_front(); } } @@ -77,21 +77,21 @@ namespace ams::kern { void KServerPort::Destroy() { /* Note with our parent that we're closed. */ - this->parent->OnClientClosed(); + m_parent->OnClientClosed(); /* Perform necessary cleanup of our session lists. */ this->CleanupSessions(); /* Close our reference to our parent. */ - this->parent->Close(); + m_parent->Close(); } bool KServerPort::IsSignaled() const { MESOSPHERE_ASSERT_THIS(); if (this->IsLight()) { - return !this->light_session_list.empty(); + return !m_light_session_list.empty(); } else { - return !this->session_list.empty(); + return !m_session_list.empty(); } } @@ -102,8 +102,8 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Add the session to our queue. */ - this->session_list.push_back(*session); - if (this->session_list.size() == 1) { + m_session_list.push_back(*session); + if (m_session_list.size() == 1) { this->NotifyAvailable(); } } @@ -115,8 +115,8 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Add the session to our queue. */ - this->light_session_list.push_back(*session); - if (this->light_session_list.size() == 1) { + m_light_session_list.push_back(*session); + if (m_light_session_list.size() == 1) { this->NotifyAvailable(); } } @@ -128,12 +128,12 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Return the first session in the list. */ - if (this->session_list.empty()) { + if (m_session_list.empty()) { return nullptr; } - KServerSession *session = std::addressof(this->session_list.front()); - this->session_list.pop_front(); + KServerSession *session = std::addressof(m_session_list.front()); + m_session_list.pop_front(); return session; } @@ -144,12 +144,12 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Return the first session in the list. */ - if (this->light_session_list.empty()) { + if (m_light_session_list.empty()) { return nullptr; } - KLightServerSession *session = std::addressof(this->light_session_list.front()); - this->light_session_list.pop_front(); + KLightServerSession *session = std::addressof(m_light_session_list.front()); + m_light_session_list.pop_front(); return session; } diff --git a/libraries/libmesosphere/source/kern_k_server_session.cpp b/libraries/libmesosphere/source/kern_k_server_session.cpp index 798209d43..a5e1d0f1f 100644 --- a/libraries/libmesosphere/source/kern_k_server_session.cpp +++ b/libraries/libmesosphere/source/kern_k_server_session.cpp @@ -32,10 +32,10 @@ namespace ams::kern { class ReceiveList { private: - u32 data[ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountMax * ipc::MessageBuffer::ReceiveListEntry::GetDataSize() / sizeof(u32)]; - s32 recv_list_count; - uintptr_t msg_buffer_end; - uintptr_t msg_buffer_space_end; + u32 m_data[ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountMax * ipc::MessageBuffer::ReceiveListEntry::GetDataSize() / sizeof(u32)]; + s32 m_recv_list_count; + uintptr_t m_msg_buffer_end; + uintptr_t m_msg_buffer_space_end; public: static constexpr int GetEntryCount(const ipc::MessageBuffer::MessageHeader &header) { const auto count = header.GetReceiveListCount(); @@ -52,9 +52,9 @@ namespace ams::kern { } public: ReceiveList(const u32 *dst_msg, uintptr_t dst_address, const KProcessPageTable &dst_page_table, const ipc::MessageBuffer::MessageHeader &dst_header, const ipc::MessageBuffer::SpecialHeader &dst_special_header, size_t msg_size, size_t out_offset, s32 dst_recv_list_idx, bool is_tls) { - this->recv_list_count = dst_header.GetReceiveListCount(); - this->msg_buffer_end = dst_address + sizeof(u32) * out_offset; - this->msg_buffer_space_end = dst_address + msg_size; + m_recv_list_count = dst_header.GetReceiveListCount(); + m_msg_buffer_end = dst_address + sizeof(u32) * out_offset; + m_msg_buffer_space_end = dst_address + msg_size; /* NOTE: Nintendo calculates the receive list index here using the special header. */ /* We pre-calculate it in the caller, and pass it as a parameter. */ @@ -64,7 +64,7 @@ namespace ams::kern { const auto entry_count = GetEntryCount(dst_header); if (is_tls) { - __builtin_memcpy(this->data, recv_list, entry_count * ipc::MessageBuffer::ReceiveListEntry::GetDataSize()); + __builtin_memcpy(m_data, recv_list, entry_count * ipc::MessageBuffer::ReceiveListEntry::GetDataSize()); } else { uintptr_t page_addr = util::AlignDown(dst_address, PageSize); uintptr_t cur_addr = dst_address + dst_recv_list_idx * sizeof(u32); @@ -76,18 +76,18 @@ namespace ams::kern { recv_list = GetPointer(KPageTable::GetHeapVirtualAddress(phys_addr)); page_addr = util::AlignDown(cur_addr, PageSize); } - this->data[i] = *(recv_list++); + m_data[i] = *(recv_list++); cur_addr += sizeof(u32); } } } constexpr bool IsIndex() const { - return this->recv_list_count > ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset; + return m_recv_list_count > ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset; } void GetBuffer(uintptr_t &out, size_t size, int &key) const { - switch (this->recv_list_count) { + switch (m_recv_list_count) { case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_None: { out = 0; @@ -95,11 +95,11 @@ namespace ams::kern { break; case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer: { - const uintptr_t buf = util::AlignUp(this->msg_buffer_end + key, PointerTransferBufferAlignment); + const uintptr_t buf = util::AlignUp(m_msg_buffer_end + key, PointerTransferBufferAlignment); - if ((buf < buf + size) && (buf + size <= this->msg_buffer_space_end)) { + if ((buf < buf + size) && (buf + size <= m_msg_buffer_space_end)) { out = buf; - key = buf + size - this->msg_buffer_end; + key = buf + size - m_msg_buffer_end; } else { out = 0; } @@ -107,7 +107,7 @@ namespace ams::kern { break; case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToSingleBuffer: { - const ipc::MessageBuffer::ReceiveListEntry entry(this->data[0], this->data[1]); + const ipc::MessageBuffer::ReceiveListEntry entry(m_data[0], m_data[1]); const uintptr_t buf = util::AlignUp(entry.GetAddress() + key, PointerTransferBufferAlignment); const uintptr_t entry_addr = entry.GetAddress(); @@ -123,8 +123,8 @@ namespace ams::kern { break; default: { - if (key < this->recv_list_count - ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset) { - const ipc::MessageBuffer::ReceiveListEntry entry(this->data[2 * key + 0], this->data[2 * key + 1]); + if (key < m_recv_list_count - ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset) { + const ipc::MessageBuffer::ReceiveListEntry entry(m_data[2 * key + 0], m_data[2 * key + 1]); const uintptr_t entry_addr = entry.GetAddress(); const size_t entry_size = entry.GetSize(); @@ -953,18 +953,18 @@ namespace ams::kern { void KServerSession::Destroy() { MESOSPHERE_ASSERT_THIS(); - this->parent->OnServerClosed(); + m_parent->OnServerClosed(); this->CleanupRequests(); - this->parent->Close(); + m_parent->Close(); } Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server_buffer_size, KPhysicalAddress server_message_paddr) { MESOSPHERE_ASSERT_THIS(); /* Lock the session. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Get the request and client thread. */ KSessionRequest *request; @@ -973,17 +973,17 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Ensure that we can service the request. */ - R_UNLESS(!this->parent->IsClientClosed(), svc::ResultSessionClosed()); + R_UNLESS(!m_parent->IsClientClosed(), svc::ResultSessionClosed()); /* Ensure we aren't already servicing a request. */ - R_UNLESS(this->current_request == nullptr, svc::ResultNotFound()); + R_UNLESS(m_current_request == nullptr, svc::ResultNotFound()); /* Ensure we have a request to service. */ - R_UNLESS(!this->request_list.empty(), svc::ResultNotFound()); + R_UNLESS(!m_request_list.empty(), svc::ResultNotFound()); /* Pop the first request from the list. */ - request = std::addressof(this->request_list.front()); - this->request_list.pop_front(); + request = std::addressof(m_request_list.front()); + m_request_list.pop_front(); /* Get the thread for the request. */ client_thread = KScopedAutoObject(request->GetThread()); @@ -991,7 +991,7 @@ namespace ams::kern { } /* Set the request as our current. */ - this->current_request = request; + m_current_request = request; /* Get the client address. */ uintptr_t client_message = request->GetAddress(); @@ -1009,9 +1009,9 @@ namespace ams::kern { /* Clear the current request. */ { KScopedSchedulerLock sl; - MESOSPHERE_ASSERT(this->current_request == request); - this->current_request = nullptr; - if (!this->request_list.empty()) { + MESOSPHERE_ASSERT(m_current_request == request); + m_current_request = nullptr; + if (!m_request_list.empty()) { this->NotifyAvailable(); } } @@ -1063,7 +1063,7 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Lock the session. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Get the request. */ KSessionRequest *request; @@ -1071,12 +1071,12 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Get the current request. */ - request = this->current_request; + request = m_current_request; R_UNLESS(request != nullptr, svc::ResultInvalidState()); /* Clear the current request, since we're processing it. */ - this->current_request = nullptr; - if (!this->request_list.empty()) { + m_current_request = nullptr; + if (!m_request_list.empty()) { this->NotifyAvailable(); } } @@ -1091,7 +1091,7 @@ namespace ams::kern { KWritableEvent *event = request->GetEvent(); /* Check whether we're closed. */ - const bool closed = (client_thread == nullptr || this->parent->IsClientClosed()); + const bool closed = (client_thread == nullptr || m_parent->IsClientClosed()); Result result; if (!closed) { @@ -1160,7 +1160,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); /* Ensure that we can handle new requests. */ - R_UNLESS(!this->parent->IsServerClosed(), svc::ResultSessionClosed()); + R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed()); /* If there's no event, this is synchronous, so we should check for thread termination. */ if (request->GetEvent() == nullptr) { @@ -1170,11 +1170,11 @@ namespace ams::kern { } /* Get whether we're empty. */ - const bool was_empty = this->request_list.empty(); + const bool was_empty = m_request_list.empty(); /* Add the request to the list. */ request->Open(); - this->request_list.push_back(*request); + m_request_list.push_back(*request); /* If we were empty, signal. */ if (was_empty) { @@ -1189,12 +1189,12 @@ namespace ams::kern { MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); /* If the client is closed, we're always signaled. */ - if (this->parent->IsClientClosed()) { + if (m_parent->IsClientClosed()) { return true; } /* Otherwise, we're signaled if we have a request and aren't handling one. */ - return !this->request_list.empty() && this->current_request == nullptr; + return !m_request_list.empty() && m_current_request == nullptr; } bool KServerSession::IsSignaled() const { @@ -1207,7 +1207,7 @@ namespace ams::kern { void KServerSession::CleanupRequests() { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Clean up any pending requests. */ while (true) { @@ -1216,14 +1216,14 @@ namespace ams::kern { { KScopedSchedulerLock sl; - if (this->current_request) { + if (m_current_request) { /* Choose the current request if we have one. */ - request = this->current_request; - this->current_request = nullptr; - } else if (!this->request_list.empty()) { + request = m_current_request; + m_current_request = nullptr; + } else if (!m_request_list.empty()) { /* Pop the request from the front of the list. */ - request = std::addressof(this->request_list.front()); - this->request_list.pop_front(); + request = std::addressof(m_request_list.front()); + m_request_list.pop_front(); } } @@ -1275,7 +1275,7 @@ namespace ams::kern { void KServerSession::OnClientClosed() { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Handle any pending requests. */ KSessionRequest *prev_request = nullptr; @@ -1291,9 +1291,9 @@ namespace ams::kern { { KScopedSchedulerLock sl; - if (this->current_request != nullptr && this->current_request != prev_request) { + if (m_current_request != nullptr && m_current_request != prev_request) { /* Set the request, open a reference as we process it. */ - request = this->current_request; + request = m_current_request; request->Open(); cur_request = true; @@ -1308,10 +1308,10 @@ namespace ams::kern { terminate = true; } prev_request = request; - } else if (!this->request_list.empty()) { + } else if (!m_request_list.empty()) { /* Pop the request from the front of the list. */ - request = std::addressof(this->request_list.front()); - this->request_list.pop_front(); + request = std::addressof(m_request_list.front()); + m_request_list.pop_front(); /* Get thread and event for the request. */ thread = request->GetThread(); @@ -1370,25 +1370,25 @@ namespace ams::kern { void KServerSession::Dump() { MESOSPHERE_ASSERT_THIS(); - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); { KScopedSchedulerLock sl; MESOSPHERE_RELEASE_LOG("Dump Session %p\n", this); /* Dump current request. */ bool has_request = false; - if (this->current_request != nullptr) { - KThread *thread = this->current_request->GetThread(); + if (m_current_request != nullptr) { + KThread *thread = m_current_request->GetThread(); const s32 thread_id = thread != nullptr ? static_cast(thread->GetId()) : -1; - MESOSPHERE_RELEASE_LOG(" CurrentReq %p Thread=%p ID=%d\n", this->current_request, thread, thread_id); + MESOSPHERE_RELEASE_LOG(" CurrentReq %p Thread=%p ID=%d\n", m_current_request, thread, thread_id); has_request = true; } /* Dump all rqeuests in list. */ - for (auto it = this->request_list.begin(); it != this->request_list.end(); ++it) { + for (auto it = m_request_list.begin(); it != m_request_list.end(); ++it) { KThread *thread = it->GetThread(); const s32 thread_id = thread != nullptr ? static_cast(thread->GetId()) : -1; - MESOSPHERE_RELEASE_LOG(" Req %p Thread=%p ID=%d\n", this->current_request, thread, thread_id); + MESOSPHERE_RELEASE_LOG(" Req %p Thread=%p ID=%d\n", m_current_request, thread, thread_id); has_request = true; } diff --git a/libraries/libmesosphere/source/kern_k_session.cpp b/libraries/libmesosphere/source/kern_k_session.cpp index 2194842d2..88906469f 100644 --- a/libraries/libmesosphere/source/kern_k_session.cpp +++ b/libraries/libmesosphere/source/kern_k_session.cpp @@ -27,53 +27,53 @@ namespace ams::kern { this->Open(); /* Create our sub sessions. */ - KAutoObject::Create(std::addressof(this->server)); - KAutoObject::Create(std::addressof(this->client)); + KAutoObject::Create(std::addressof(m_server)); + KAutoObject::Create(std::addressof(m_client)); /* Initialize our sub sessions. */ - this->server.Initialize(this); - this->client.Initialize(this); + m_server.Initialize(this); + m_client.Initialize(this); /* Set state and name. */ - this->state = State::Normal; - this->name = name; + m_state = State::Normal; + m_name = name; /* Set our owner process. */ - this->process = GetCurrentProcessPointer(); - this->process->Open(); + m_process = GetCurrentProcessPointer(); + m_process->Open(); /* Set our port. */ - this->port = client_port; - if (this->port != nullptr) { - this->port->Open(); + m_port = client_port; + if (m_port != nullptr) { + m_port->Open(); } /* Mark initialized. */ - this->initialized = true; + m_initialized = true; } void KSession::Finalize() { - if (this->port != nullptr) { - this->port->OnSessionFinalized(); - this->port->Close(); + if (m_port != nullptr) { + m_port->OnSessionFinalized(); + m_port->Close(); } } void KSession::OnServerClosed() { MESOSPHERE_ASSERT_THIS(); - if (this->state == State::Normal) { - this->state = State::ServerClosed; - this->client.OnServerClosed(); + if (m_state == State::Normal) { + m_state = State::ServerClosed; + m_client.OnServerClosed(); } } void KSession::OnClientClosed() { MESOSPHERE_ASSERT_THIS(); - if (this->state == State::Normal) { - this->state = State::ClientClosed; - this->server.OnClientClosed(); + if (m_state == State::Normal) { + m_state = State::ClientClosed; + m_server.OnClientClosed(); } } diff --git a/libraries/libmesosphere/source/kern_k_session_request.cpp b/libraries/libmesosphere/source/kern_k_session_request.cpp index 8b20c52f5..ec9431c6c 100644 --- a/libraries/libmesosphere/source/kern_k_session_request.cpp +++ b/libraries/libmesosphere/source/kern_k_session_request.cpp @@ -24,17 +24,17 @@ namespace ams::kern { /* Get the mapping. */ Mapping *mapping; if (index < NumStaticMappings) { - mapping = std::addressof(this->static_mappings[index]); + mapping = std::addressof(m_static_mappings[index]); } else { /* Allocate a page for the extra mappings. */ - if (this->mappings == nullptr) { + if (m_mappings == nullptr) { KPageBuffer *page_buffer = KPageBuffer::Allocate(); R_UNLESS(page_buffer != nullptr, svc::ResultOutOfMemory()); - this->mappings = reinterpret_cast(page_buffer); + m_mappings = reinterpret_cast(page_buffer); } - mapping = std::addressof(this->mappings[index - NumStaticMappings]); + mapping = std::addressof(m_mappings[index - NumStaticMappings]); } /* Set the mapping. */ @@ -44,24 +44,24 @@ namespace ams::kern { } Result KSessionRequest::SessionMappings::PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { - MESOSPHERE_ASSERT(this->num_recv == 0); - MESOSPHERE_ASSERT(this->num_exch == 0); - return this->PushMap(client, server, size, state, this->num_send++); + MESOSPHERE_ASSERT(m_num_recv == 0); + MESOSPHERE_ASSERT(m_num_exch == 0); + return this->PushMap(client, server, size, state, m_num_send++); } Result KSessionRequest::SessionMappings::PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { - MESOSPHERE_ASSERT(this->num_exch == 0); - return this->PushMap(client, server, size, state, this->num_send + this->num_recv++); + MESOSPHERE_ASSERT(m_num_exch == 0); + return this->PushMap(client, server, size, state, m_num_send + m_num_recv++); } Result KSessionRequest::SessionMappings::PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { - return this->PushMap(client, server, size, state, this->num_send + this->num_recv + this->num_exch++); + return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++); } void KSessionRequest::SessionMappings::Finalize() { - if (this->mappings) { - KPageBuffer::Free(reinterpret_cast(this->mappings)); - this->mappings = nullptr; + if (m_mappings) { + KPageBuffer::Free(reinterpret_cast(m_mappings)); + m_mappings = nullptr; } } diff --git a/libraries/libmesosphere/source/kern_k_shared_memory.cpp b/libraries/libmesosphere/source/kern_k_shared_memory.cpp index e06721d3f..cc4118682 100644 --- a/libraries/libmesosphere/source/kern_k_shared_memory.cpp +++ b/libraries/libmesosphere/source/kern_k_shared_memory.cpp @@ -21,9 +21,9 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Set members. */ - this->owner_process_id = owner->GetId(); - this->owner_perm = own_perm; - this->remote_perm = rem_perm; + m_owner_process_id = owner->GetId(); + m_owner_perm = own_perm; + m_remote_perm = rem_perm; /* Get the number of pages. */ const size_t num_pages = util::DivideUp(size, PageSize); @@ -37,20 +37,20 @@ namespace ams::kern { R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); /* Allocate the memory. */ - R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(this->page_group), num_pages, owner->GetAllocateOption())); + R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, owner->GetAllocateOption())); /* Commit our reservation. */ memory_reservation.Commit(); /* Set our resource limit. */ - this->resource_limit = reslimit; - this->resource_limit->Open(); + m_resource_limit = reslimit; + m_resource_limit->Open(); /* Mark initialized. */ - this->is_initialized = true; + m_is_initialized = true; /* Clear all pages in the memory. */ - for (const auto &block : this->page_group) { + for (const auto &block : m_page_group) { std::memset(GetVoidPointer(block.GetAddress()), 0, block.GetSize()); } @@ -61,16 +61,16 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Get the number of pages. */ - const size_t num_pages = this->page_group.GetNumPages(); + const size_t num_pages = m_page_group.GetNumPages(); const size_t size = num_pages * PageSize; /* Close and finalize the page group. */ - this->page_group.Close(); - this->page_group.Finalize(); + m_page_group.Close(); + m_page_group.Finalize(); /* Release the memory reservation. */ - this->resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, size); - this->resource_limit->Close(); + m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, size); + m_resource_limit->Close(); /* Perform inherited finalization. */ KAutoObjectWithSlabHeapAndContainer::Finalize(); @@ -80,10 +80,10 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Validate the size. */ - R_UNLESS(this->page_group.GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(m_page_group.GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); /* Validate the permission. */ - const ams::svc::MemoryPermission test_perm = (process->GetId() == this->owner_process_id) ? this->owner_perm : this->remote_perm; + const ams::svc::MemoryPermission test_perm = (process->GetId() == m_owner_process_id) ? m_owner_perm : m_remote_perm; if (test_perm == ams::svc::MemoryPermission_DontCare) { MESOSPHERE_ASSERT(map_perm == ams::svc::MemoryPermission_Read || map_perm == ams::svc::MemoryPermission_ReadWrite); } else { @@ -91,7 +91,7 @@ namespace ams::kern { } /* Map the memory. */ - return table->MapPageGroup(address, this->page_group, KMemoryState_Shared, ConvertToKMemoryPermission(map_perm)); + return table->MapPageGroup(address, m_page_group, KMemoryState_Shared, ConvertToKMemoryPermission(map_perm)); } Result KSharedMemory::Unmap(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process) { @@ -99,10 +99,10 @@ namespace ams::kern { MESOSPHERE_UNUSED(process); /* Validate the size. */ - R_UNLESS(this->page_group.GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(m_page_group.GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); /* Unmap the memory. */ - return table->UnmapPageGroup(address, this->page_group, KMemoryState_Shared); + return table->UnmapPageGroup(address, m_page_group, KMemoryState_Shared); } } diff --git a/libraries/libmesosphere/source/kern_k_synchronization_object.cpp b/libraries/libmesosphere/source/kern_k_synchronization_object.cpp index 1dcb33c2a..9ed14016f 100644 --- a/libraries/libmesosphere/source/kern_k_synchronization_object.cpp +++ b/libraries/libmesosphere/source/kern_k_synchronization_object.cpp @@ -25,7 +25,7 @@ namespace ams::kern { { KScopedSchedulerLock sl; - for (auto *cur_node = this->thread_list_root; cur_node != nullptr; cur_node = cur_node->next) { + for (auto *cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { KThread *thread = cur_node->thread; MESOSPHERE_LOG("KSynchronizationObject::Finalize(%p) with %p (id=%ld) waiting.\n", this, thread, thread->GetId()); } @@ -83,13 +83,13 @@ namespace ams::kern { thread_nodes[i].thread = thread; thread_nodes[i].next = nullptr; - if (objects[i]->thread_list_tail == nullptr) { - objects[i]->thread_list_head = std::addressof(thread_nodes[i]); + if (objects[i]->m_thread_list_tail == nullptr) { + objects[i]->m_thread_list_head = std::addressof(thread_nodes[i]); } else { - objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]); + objects[i]->m_thread_list_tail->next = std::addressof(thread_nodes[i]); } - objects[i]->thread_list_tail = std::addressof(thread_nodes[i]); + objects[i]->m_thread_list_tail = std::addressof(thread_nodes[i]); } /* Mark the thread as waiting. */ @@ -118,7 +118,7 @@ namespace ams::kern { for (auto i = 0; i < num_objects; ++i) { /* Unlink the object from the list. */ - ThreadListNode *prev_ptr = reinterpret_cast(std::addressof(objects[i]->thread_list_head)); + ThreadListNode *prev_ptr = reinterpret_cast(std::addressof(objects[i]->m_thread_list_head)); ThreadListNode *prev_val = nullptr; ThreadListNode *prev, *tail_prev; @@ -129,8 +129,8 @@ namespace ams::kern { prev_val = prev_ptr; } while (prev_ptr != std::addressof(thread_nodes[i])); - if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) { - objects[i]->thread_list_tail = tail_prev; + if (objects[i]->m_thread_list_tail == std::addressof(thread_nodes[i])) { + objects[i]->m_thread_list_tail = tail_prev; } prev->next = thread_nodes[i].next; @@ -157,7 +157,7 @@ namespace ams::kern { } /* Iterate over each thread. */ - for (auto *cur_node = this->thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { + for (auto *cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { KThread *thread = cur_node->thread; if (thread->GetState() == KThread::ThreadState_Waiting) { thread->SetSyncedObject(this, result); @@ -176,7 +176,7 @@ namespace ams::kern { MESOSPHERE_RELEASE_LOG("Threads waiting on %p:\n", this); - for (auto *cur_node = this->thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { + for (auto *cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { KThread *thread = cur_node->thread; if (KProcess *process = thread->GetOwnerProcess(); process != nullptr) { @@ -187,7 +187,7 @@ namespace ams::kern { } /* If we didn't have any waiters, print so. */ - if (this->thread_list_head == nullptr) { + if (m_thread_list_head == nullptr) { MESOSPHERE_RELEASE_LOG(" None\n"); } } diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp index ea1316e58..2a89ebb28 100644 --- a/libraries/libmesosphere/source/kern_k_thread.cpp +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -60,7 +60,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(0 <= phys_core && phys_core < static_cast(cpu::NumCores)); /* First, clear the TLS address. */ - this->tls_address = Null; + m_tls_address = Null; const uintptr_t kern_stack_top_address = reinterpret_cast(kern_stack_top); @@ -94,73 +94,73 @@ namespace ams::kern { } /* Set the ideal core ID and affinity mask. */ - this->virtual_ideal_core_id = virt_core; - this->physical_ideal_core_id = phys_core; - this->virtual_affinity_mask = (static_cast(1) << virt_core); - this->physical_affinity_mask.SetAffinity(phys_core, true); + m_virtual_ideal_core_id = virt_core; + m_physical_ideal_core_id = phys_core; + m_virtual_affinity_mask = (static_cast(1) << virt_core); + m_physical_affinity_mask.SetAffinity(phys_core, true); /* Set the thread state. */ - this->thread_state = (type == ThreadType_Main) ? ThreadState_Runnable : ThreadState_Initialized; + m_thread_state = (type == ThreadType_Main) ? ThreadState_Runnable : ThreadState_Initialized; /* Set TLS address and TLS heap address. */ /* NOTE: Nintendo wrote TLS address above already, but official code really does write tls address twice. */ - this->tls_address = 0; - this->tls_heap_address = 0; + m_tls_address = 0; + m_tls_heap_address = 0; /* Set parent and condvar tree. */ - this->parent = nullptr; - this->condvar_tree = nullptr; + m_parent = nullptr; + m_condvar_tree = nullptr; /* Set sync booleans. */ - this->signaled = false; - this->termination_requested = false; - this->wait_cancelled = false; - this->cancellable = false; + m_signaled = false; + m_termination_requested = false; + m_wait_cancelled = false; + m_cancellable = false; /* Set core ID and wait result. */ - this->core_id = phys_core; - this->wait_result = svc::ResultNoSynchronizationObject(); + m_core_id = phys_core; + m_wait_result = svc::ResultNoSynchronizationObject(); /* Set the stack top. */ - this->kernel_stack_top = kern_stack_top; + m_kernel_stack_top = kern_stack_top; /* Set priorities. */ - this->priority = prio; - this->base_priority = prio; + m_priority = prio; + m_base_priority = prio; /* Set sync object and waiting lock to null. */ - this->synced_object = nullptr; - this->waiting_lock = nullptr; + m_synced_object = nullptr; + m_waiting_lock = nullptr; /* Initialize sleeping queue. */ - this->sleeping_queue = nullptr; + m_sleeping_queue = nullptr; /* Set suspend flags. */ - this->suspend_request_flags = 0; - this->suspend_allowed_flags = ThreadState_SuspendFlagMask; + m_suspend_request_flags = 0; + m_suspend_allowed_flags = ThreadState_SuspendFlagMask; /* We're neither debug attached, nor are we nesting our priority inheritance. */ - this->debug_attached = false; - this->priority_inheritance_count = 0; + m_debug_attached = false; + m_priority_inheritance_count = 0; /* We haven't been scheduled, and we have done no light IPC. */ - this->schedule_count = -1; - this->last_scheduled_tick = 0; - this->light_ipc_data = nullptr; + m_schedule_count = -1; + m_last_scheduled_tick = 0; + m_light_ipc_data = nullptr; /* We're not waiting for a lock, and we haven't disabled migration. */ - this->lock_owner = nullptr; - this->num_core_migration_disables = 0; + m_lock_owner = nullptr; + m_num_core_migration_disables = 0; /* We have no waiters, but we do have an entrypoint. */ - this->num_kernel_waiters = 0; + m_num_kernel_waiters = 0; /* Set our current core id. */ - this->current_core_id = phys_core; + m_current_core_id = phys_core; /* We haven't released our resource limit hint, and we've spent no time on the cpu. */ - this->resource_limit_release_hint = 0; - this->cpu_time = 0; + m_resource_limit_release_hint = 0; + m_cpu_time = 0; /* Setup our kernel stack. */ if (type != ThreadType_Main) { @@ -172,45 +172,45 @@ namespace ams::kern { /* Setup the TLS, if needed. */ if (type == ThreadType_User) { - R_TRY(owner->CreateThreadLocalRegion(std::addressof(this->tls_address))); - this->tls_heap_address = owner->GetThreadLocalRegionPointer(this->tls_address); - std::memset(this->tls_heap_address, 0, ams::svc::ThreadLocalRegionSize); + R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address))); + m_tls_heap_address = owner->GetThreadLocalRegionPointer(m_tls_address); + std::memset(m_tls_heap_address, 0, ams::svc::ThreadLocalRegionSize); } /* Set parent, if relevant. */ if (owner != nullptr) { - this->parent = owner; - this->parent->Open(); - this->parent->IncrementThreadCount(); + m_parent = owner; + m_parent->Open(); + m_parent->IncrementThreadCount(); } /* Initialize thread context. */ constexpr bool IsDefault64Bit = sizeof(uintptr_t) == sizeof(u64); - const bool is_64_bit = this->parent ? this->parent->Is64Bit() : IsDefault64Bit; + const bool is_64_bit = m_parent ? m_parent->Is64Bit() : IsDefault64Bit; const bool is_user = (type == ThreadType_User); const bool is_main = (type == ThreadType_Main); - this->thread_context.Initialize(reinterpret_cast(func), reinterpret_cast(this->GetStackTop()), GetInteger(user_stack_top), arg, is_user, is_64_bit, is_main); + m_thread_context.Initialize(reinterpret_cast(func), reinterpret_cast(this->GetStackTop()), GetInteger(user_stack_top), arg, is_user, is_64_bit, is_main); /* Setup the stack parameters. */ StackParameters &sp = this->GetStackParameters(); - if (this->parent != nullptr) { - this->parent->CopySvcPermissionsTo(sp); + if (m_parent != nullptr) { + m_parent->CopySvcPermissionsTo(sp); } - sp.context = std::addressof(this->thread_context); + sp.context = std::addressof(m_thread_context); sp.cur_thread = this; sp.disable_count = 1; this->SetInExceptionHandler(); /* Set thread ID. */ - this->thread_id = s_next_thread_id++; + m_thread_id = s_next_thread_id++; /* We initialized! */ - this->initialized = true; + m_initialized = true; /* Register ourselves with our parent process. */ - if (this->parent != nullptr) { - this->parent->RegisterThread(this); - if (this->parent->IsSuspended()) { + if (m_parent != nullptr) { + m_parent->RegisterThread(this); + if (m_parent->IsSuspended()) { this->RequestSuspend(SuspendType_Process); } } @@ -276,42 +276,42 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* If the thread has an owner process, unregister it. */ - if (this->parent != nullptr) { - this->parent->UnregisterThread(this); + if (m_parent != nullptr) { + m_parent->UnregisterThread(this); } /* If the thread has a local region, delete it. */ - if (this->tls_address != Null) { - MESOSPHERE_R_ABORT_UNLESS(this->parent->DeleteThreadLocalRegion(this->tls_address)); + if (m_tls_address != Null) { + MESOSPHERE_R_ABORT_UNLESS(m_parent->DeleteThreadLocalRegion(m_tls_address)); } /* Release any waiters. */ { - MESOSPHERE_ASSERT(this->lock_owner == nullptr); + MESOSPHERE_ASSERT(m_lock_owner == nullptr); KScopedSchedulerLock sl; - auto it = this->waiter_list.begin(); - while (it != this->waiter_list.end()) { + auto it = m_waiter_list.begin(); + while (it != m_waiter_list.end()) { /* The thread shouldn't be a kernel waiter. */ MESOSPHERE_ASSERT(!IsKernelAddressKey(it->GetAddressKey())); it->SetLockOwner(nullptr); it->SetSyncedObject(nullptr, svc::ResultInvalidState()); it->Wakeup(); - it = this->waiter_list.erase(it); + it = m_waiter_list.erase(it); } } /* Finalize the thread context. */ - this->thread_context.Finalize(); + m_thread_context.Finalize(); /* Cleanup the kernel stack. */ - if (this->kernel_stack_top != nullptr) { - CleanupKernelStack(reinterpret_cast(this->kernel_stack_top)); + if (m_kernel_stack_top != nullptr) { + CleanupKernelStack(reinterpret_cast(m_kernel_stack_top)); } /* Decrement the parent process's thread count. */ - if (this->parent != nullptr) { - this->parent->DecrementThreadCount(); + if (m_parent != nullptr) { + m_parent->DecrementThreadCount(); } /* Perform inherited finalization. */ @@ -319,7 +319,7 @@ namespace ams::kern { } bool KThread::IsSignaled() const { - return this->signaled; + return m_signaled; } void KThread::Wakeup() { @@ -327,8 +327,8 @@ namespace ams::kern { KScopedSchedulerLock sl; if (this->GetState() == ThreadState_Waiting) { - if (this->sleeping_queue != nullptr) { - this->sleeping_queue->WakeupThread(this); + if (m_sleeping_queue != nullptr) { + m_sleeping_queue->WakeupThread(this); } else { this->SetState(ThreadState_Runnable); } @@ -347,10 +347,10 @@ namespace ams::kern { MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); /* Release user exception and unpin, if relevant. */ - if (this->parent != nullptr) { - this->parent->ReleaseUserException(this); - if (this->parent->GetPinnedThread(GetCurrentCoreId()) == this) { - this->parent->UnpinCurrentThread(); + if (m_parent != nullptr) { + m_parent->ReleaseUserException(this); + if (m_parent->GetPinnedThread(GetCurrentCoreId()) == this) { + m_parent->UnpinCurrentThread(); } } @@ -358,12 +358,12 @@ namespace ams::kern { this->SetState(KThread::ThreadState_Terminated); /* Clear the thread's status as running in parent. */ - if (this->parent != nullptr) { - this->parent->ClearRunningThread(this); + if (m_parent != nullptr) { + m_parent->ClearRunningThread(this); } /* Signal. */ - this->signaled = true; + m_signaled = true; this->NotifyAvailable(); /* Call the on thread termination handler. */ @@ -380,7 +380,7 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Ensure that the thread is not executing on any core. */ - if (this->parent != nullptr) { + if (m_parent != nullptr) { for (size_t i = 0; i < cpu::NumCores; ++i) { KThread *core_thread; do { @@ -406,43 +406,43 @@ namespace ams::kern { this->GetStackParameters().is_pinned = true; /* Disable core migration. */ - MESOSPHERE_ASSERT(this->num_core_migration_disables == 0); + MESOSPHERE_ASSERT(m_num_core_migration_disables == 0); { - ++this->num_core_migration_disables; + ++m_num_core_migration_disables; /* Save our ideal state to restore when we're unpinned. */ - this->original_physical_ideal_core_id = this->physical_ideal_core_id; - this->original_physical_affinity_mask = this->physical_affinity_mask; + m_original_physical_ideal_core_id = m_physical_ideal_core_id; + m_original_physical_affinity_mask = m_physical_affinity_mask; /* Bind ourselves to this core. */ const s32 active_core = this->GetActiveCore(); const s32 current_core = GetCurrentCoreId(); this->SetActiveCore(current_core); - this->physical_ideal_core_id = current_core; - this->physical_affinity_mask.SetAffinityMask(1ul << current_core); + m_physical_ideal_core_id = current_core; + m_physical_affinity_mask.SetAffinityMask(1ul << current_core); - if (active_core != current_core || this->physical_affinity_mask.GetAffinityMask() != this->original_physical_affinity_mask.GetAffinityMask()) { - KScheduler::OnThreadAffinityMaskChanged(this, this->original_physical_affinity_mask, active_core); + if (active_core != current_core || m_physical_affinity_mask.GetAffinityMask() != m_original_physical_affinity_mask.GetAffinityMask()) { + KScheduler::OnThreadAffinityMaskChanged(this, m_original_physical_affinity_mask, active_core); } } /* Disallow performing thread suspension. */ { /* Update our allow flags. */ - this->suspend_allowed_flags &= ~(1 << (SuspendType_Thread + ThreadState_SuspendShift)); + m_suspend_allowed_flags &= ~(1 << (SuspendType_Thread + ThreadState_SuspendShift)); /* Update our state. */ - const ThreadState old_state = this->thread_state; - this->thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); - if (this->thread_state != old_state) { + const ThreadState old_state = m_thread_state; + m_thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); + if (m_thread_state != old_state) { KScheduler::OnThreadStateChanged(this, old_state); } } /* Update our SVC access permissions. */ - MESOSPHERE_ASSERT(this->parent != nullptr); - this->parent->CopyPinnedSvcPermissionsTo(this->GetStackParameters()); + MESOSPHERE_ASSERT(m_parent != nullptr); + m_parent->CopyPinnedSvcPermissionsTo(this->GetStackParameters()); } void KThread::Unpin() { @@ -453,24 +453,24 @@ namespace ams::kern { this->GetStackParameters().is_pinned = false; /* Enable core migration. */ - MESOSPHERE_ASSERT(this->num_core_migration_disables == 1); + MESOSPHERE_ASSERT(m_num_core_migration_disables == 1); { - --this->num_core_migration_disables; + --m_num_core_migration_disables; /* Restore our original state. */ - const KAffinityMask old_mask = this->physical_affinity_mask; + const KAffinityMask old_mask = m_physical_affinity_mask; - this->physical_ideal_core_id = this->original_physical_ideal_core_id; - this->physical_affinity_mask = this->original_physical_affinity_mask; + m_physical_ideal_core_id = m_original_physical_ideal_core_id; + m_physical_affinity_mask = m_original_physical_affinity_mask; - if (this->physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { const s32 active_core = this->GetActiveCore(); - if (!this->physical_affinity_mask.GetAffinity(active_core)) { - if (this->physical_ideal_core_id >= 0) { - this->SetActiveCore(this->physical_ideal_core_id); + if (!m_physical_affinity_mask.GetAffinity(active_core)) { + if (m_physical_ideal_core_id >= 0) { + this->SetActiveCore(m_physical_ideal_core_id); } else { - this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->physical_affinity_mask.GetAffinityMask())); + this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(m_physical_affinity_mask.GetAffinityMask())); } } KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core); @@ -481,23 +481,23 @@ namespace ams::kern { { /* Update our allow flags. */ if (!this->IsTerminationRequested()) { - this->suspend_allowed_flags |= (1 << (SuspendType_Thread + ThreadState_SuspendShift)); + m_suspend_allowed_flags |= (1 << (SuspendType_Thread + ThreadState_SuspendShift)); } /* Update our state. */ - const ThreadState old_state = this->thread_state; - this->thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); - if (this->thread_state != old_state) { + const ThreadState old_state = m_thread_state; + m_thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); + if (m_thread_state != old_state) { KScheduler::OnThreadStateChanged(this, old_state); } } /* Update our SVC access permissions. */ - MESOSPHERE_ASSERT(this->parent != nullptr); - this->parent->CopyUnpinnedSvcPermissionsTo(this->GetStackParameters()); + MESOSPHERE_ASSERT(m_parent != nullptr); + m_parent->CopyUnpinnedSvcPermissionsTo(this->GetStackParameters()); /* Resume any threads that began waiting on us while we were pinned. */ - for (auto it = this->pinned_waiter_list.begin(); it != this->pinned_waiter_list.end(); ++it) { + for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); ++it) { if (it->GetState() == ThreadState_Waiting) { it->SetState(ThreadState_Runnable); } @@ -509,19 +509,19 @@ namespace ams::kern { MESOSPHERE_ASSERT(this == GetCurrentThreadPointer()); KScopedSchedulerLock sl; - MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0); - if ((this->num_core_migration_disables++) == 0) { + MESOSPHERE_ASSERT(m_num_core_migration_disables >= 0); + if ((m_num_core_migration_disables++) == 0) { /* Save our ideal state to restore when we can migrate again. */ - this->original_physical_ideal_core_id = this->physical_ideal_core_id; - this->original_physical_affinity_mask = this->physical_affinity_mask; + m_original_physical_ideal_core_id = m_physical_ideal_core_id; + m_original_physical_affinity_mask = m_physical_affinity_mask; /* Bind ourselves to this core. */ const s32 active_core = this->GetActiveCore(); - this->physical_ideal_core_id = active_core; - this->physical_affinity_mask.SetAffinityMask(1ul << active_core); + m_physical_ideal_core_id = active_core; + m_physical_affinity_mask.SetAffinityMask(1ul << active_core); - if (this->physical_affinity_mask.GetAffinityMask() != this->original_physical_affinity_mask.GetAffinityMask()) { - KScheduler::OnThreadAffinityMaskChanged(this, this->original_physical_affinity_mask, active_core); + if (m_physical_affinity_mask.GetAffinityMask() != m_original_physical_affinity_mask.GetAffinityMask()) { + KScheduler::OnThreadAffinityMaskChanged(this, m_original_physical_affinity_mask, active_core); } } } @@ -531,22 +531,22 @@ namespace ams::kern { MESOSPHERE_ASSERT(this == GetCurrentThreadPointer()); KScopedSchedulerLock sl; - MESOSPHERE_ASSERT(this->num_core_migration_disables > 0); - if ((--this->num_core_migration_disables) == 0) { - const KAffinityMask old_mask = this->physical_affinity_mask; + MESOSPHERE_ASSERT(m_num_core_migration_disables > 0); + if ((--m_num_core_migration_disables) == 0) { + const KAffinityMask old_mask = m_physical_affinity_mask; /* Restore our ideals. */ - this->physical_ideal_core_id = this->original_physical_ideal_core_id; - this->physical_affinity_mask = this->original_physical_affinity_mask; + m_physical_ideal_core_id = m_original_physical_ideal_core_id; + m_physical_affinity_mask = m_original_physical_affinity_mask; - if (this->physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { const s32 active_core = this->GetActiveCore(); - if (!this->physical_affinity_mask.GetAffinity(active_core)) { - if (this->physical_ideal_core_id >= 0) { - this->SetActiveCore(this->physical_ideal_core_id); + if (!m_physical_affinity_mask.GetAffinity(active_core)) { + if (m_physical_ideal_core_id >= 0) { + this->SetActiveCore(m_physical_ideal_core_id); } else { - this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->physical_affinity_mask.GetAffinityMask())); + this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(m_physical_affinity_mask.GetAffinityMask())); } } KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core); @@ -560,8 +560,8 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Get the virtual mask. */ - *out_ideal_core = this->virtual_ideal_core_id; - *out_affinity_mask = this->virtual_affinity_mask; + *out_ideal_core = m_virtual_ideal_core_id; + *out_affinity_mask = m_virtual_affinity_mask; } return ResultSuccess(); @@ -571,15 +571,15 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); { KScopedSchedulerLock sl; - MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0); + MESOSPHERE_ASSERT(m_num_core_migration_disables >= 0); /* Select between core mask and original core mask. */ - if (this->num_core_migration_disables == 0) { - *out_ideal_core = this->physical_ideal_core_id; - *out_affinity_mask = this->physical_affinity_mask.GetAffinityMask(); + if (m_num_core_migration_disables == 0) { + *out_ideal_core = m_physical_ideal_core_id; + *out_affinity_mask = m_physical_affinity_mask.GetAffinityMask(); } else { - *out_ideal_core = this->original_physical_ideal_core_id; - *out_affinity_mask = this->original_physical_affinity_mask.GetAffinityMask(); + *out_ideal_core = m_original_physical_ideal_core_id; + *out_affinity_mask = m_original_physical_affinity_mask.GetAffinityMask(); } } @@ -588,25 +588,25 @@ namespace ams::kern { Result KThread::SetCoreMask(int32_t core_id, u64 v_affinity_mask) { MESOSPHERE_ASSERT_THIS(); - MESOSPHERE_ASSERT(this->parent != nullptr); + MESOSPHERE_ASSERT(m_parent != nullptr); MESOSPHERE_ASSERT(v_affinity_mask != 0); - KScopedLightLock lk(this->activity_pause_lock); + KScopedLightLock lk(m_activity_pause_lock); /* Set the core mask. */ u64 p_affinity_mask = 0; { KScopedSchedulerLock sl; - MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0); + MESOSPHERE_ASSERT(m_num_core_migration_disables >= 0); /* If the core id is no-update magic, preserve the ideal core id. */ if (core_id == ams::svc::IdealCoreNoUpdate) { - core_id = this->virtual_ideal_core_id; + core_id = m_virtual_ideal_core_id; R_UNLESS(((1ul << core_id) & v_affinity_mask) != 0, svc::ResultInvalidCombination()); } /* Set the virtual core/affinity mask. */ - this->virtual_ideal_core_id = core_id; - this->virtual_affinity_mask = v_affinity_mask; + m_virtual_ideal_core_id = core_id; + m_virtual_affinity_mask = v_affinity_mask; /* Translate the virtual core to a physical core. */ if (core_id >= 0) { @@ -621,26 +621,26 @@ namespace ams::kern { } /* If we haven't disabled migration, perform an affinity change. */ - if (this->num_core_migration_disables == 0) { - const KAffinityMask old_mask = this->physical_affinity_mask; + if (m_num_core_migration_disables == 0) { + const KAffinityMask old_mask = m_physical_affinity_mask; /* Set our new ideals. */ - this->physical_ideal_core_id = core_id; - this->physical_affinity_mask.SetAffinityMask(p_affinity_mask); + m_physical_ideal_core_id = core_id; + m_physical_affinity_mask.SetAffinityMask(p_affinity_mask); - if (this->physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { const s32 active_core = this->GetActiveCore(); - if (active_core >= 0 && !this->physical_affinity_mask.GetAffinity(active_core)) { - const s32 new_core = this->physical_ideal_core_id >= 0 ? this->physical_ideal_core_id : BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->physical_affinity_mask.GetAffinityMask()); + if (active_core >= 0 && !m_physical_affinity_mask.GetAffinity(active_core)) { + const s32 new_core = m_physical_ideal_core_id >= 0 ? m_physical_ideal_core_id : BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(m_physical_affinity_mask.GetAffinityMask()); this->SetActiveCore(new_core); } KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core); } } else { /* Otherwise, we edit the original affinity for restoration later. */ - this->original_physical_ideal_core_id = core_id; - this->original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); + m_original_physical_ideal_core_id = core_id; + m_original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); } } @@ -679,7 +679,7 @@ namespace ams::kern { thread_is_pinned = true; /* Wait until the thread isn't pinned any more. */ - this->pinned_waiter_list.push_back(GetCurrentThread()); + m_pinned_waiter_list.push_back(GetCurrentThread()); GetCurrentThread().SetState(ThreadState_Waiting); } else { /* If the thread isn't pinned, release the scheduler lock and retry until it's not current. */ @@ -694,7 +694,7 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Remove from the list. */ - this->pinned_waiter_list.erase(this->pinned_waiter_list.iterator_to(GetCurrentThread())); + m_pinned_waiter_list.erase(m_pinned_waiter_list.iterator_to(GetCurrentThread())); } } @@ -708,7 +708,7 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Change our base priority. */ - this->base_priority = priority; + m_base_priority = priority; /* Perform a priority restoration. */ RestorePriority(this); @@ -720,9 +720,9 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Change both our priorities to the idle thread priority. */ - const s32 old_priority = this->priority; - this->priority = IdleThreadPriority; - this->base_priority = IdleThreadPriority; + const s32 old_priority = m_priority; + m_priority = IdleThreadPriority; + m_base_priority = IdleThreadPriority; KScheduler::OnThreadPriorityChanged(this, old_priority); return ResultSuccess(); @@ -734,7 +734,7 @@ namespace ams::kern { KScopedSchedulerLock lk; /* Note the request in our flags. */ - this->suspend_request_flags |= (1u << (ThreadState_SuspendShift + type)); + m_suspend_request_flags |= (1u << (ThreadState_SuspendShift + type)); /* Try to perform the suspend. */ this->TrySuspend(); @@ -746,12 +746,12 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Clear the request in our flags. */ - this->suspend_request_flags &= ~(1u << (ThreadState_SuspendShift + type)); + m_suspend_request_flags &= ~(1u << (ThreadState_SuspendShift + type)); /* Update our state. */ - const ThreadState old_state = this->thread_state; - this->thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); - if (this->thread_state != old_state) { + const ThreadState old_state = m_thread_state; + m_thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); + if (m_thread_state != old_state) { KScheduler::OnThreadStateChanged(this, old_state); } } @@ -762,18 +762,18 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Check if we're waiting and cancellable. */ - if (this->GetState() == ThreadState_Waiting && this->cancellable) { - if (this->sleeping_queue != nullptr) { - this->sleeping_queue->WakeupThread(this); - this->wait_cancelled = true; + if (this->GetState() == ThreadState_Waiting && m_cancellable) { + if (m_sleeping_queue != nullptr) { + m_sleeping_queue->WakeupThread(this); + m_wait_cancelled = true; } else { this->SetSyncedObject(nullptr, svc::ResultCancelled()); this->SetState(ThreadState_Runnable); - this->wait_cancelled = false; + m_wait_cancelled = false; } } else { /* Otherwise, note that we cancelled a wait. */ - this->wait_cancelled = true; + m_wait_cancelled = true; } } @@ -798,8 +798,8 @@ namespace ams::kern { MESOSPHERE_ASSERT(this->IsSuspendRequested()); /* Set our suspend flags in state. */ - const auto old_state = this->thread_state; - this->thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); + const auto old_state = m_thread_state; + m_thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); /* Note the state change in scheduler. */ KScheduler::OnThreadStateChanged(this, old_state); @@ -810,8 +810,8 @@ namespace ams::kern { MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); /* Clear our suspend flags in state. */ - const auto old_state = this->thread_state; - this->thread_state = static_cast(old_state & ThreadState_Mask); + const auto old_state = m_thread_state; + m_thread_state = static_cast(old_state & ThreadState_Mask); /* Note the state change in scheduler. */ KScheduler::OnThreadStateChanged(this, old_state); @@ -819,10 +819,10 @@ namespace ams::kern { size_t KThread::GetKernelStackUsage() const { MESOSPHERE_ASSERT_THIS(); - MESOSPHERE_ASSERT(this->kernel_stack_top != nullptr); + MESOSPHERE_ASSERT(m_kernel_stack_top != nullptr); #if defined(MESOSPHERE_ENABLE_KERNEL_STACK_USAGE) - const u8 *stack = static_cast(this->kernel_stack_top) - PageSize; + const u8 *stack = static_cast(m_kernel_stack_top) - PageSize; size_t i; for (i = 0; i < PageSize; ++i) { @@ -839,7 +839,7 @@ namespace ams::kern { Result KThread::SetActivity(ams::svc::ThreadActivity activity) { /* Lock ourselves. */ - KScopedLightLock lk(this->activity_pause_lock); + KScopedLightLock lk(m_activity_pause_lock); /* Set the activity. */ { @@ -889,7 +889,7 @@ namespace ams::kern { thread_is_current = false; /* Wait until the thread isn't pinned any more. */ - this->pinned_waiter_list.push_back(GetCurrentThread()); + m_pinned_waiter_list.push_back(GetCurrentThread()); GetCurrentThread().SetState(ThreadState_Waiting); } else { /* Check if the thread is currently running. */ @@ -911,7 +911,7 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Remove from the list. */ - this->pinned_waiter_list.erase(this->pinned_waiter_list.iterator_to(GetCurrentThread())); + m_pinned_waiter_list.erase(m_pinned_waiter_list.iterator_to(GetCurrentThread())); } } @@ -920,7 +920,7 @@ namespace ams::kern { Result KThread::GetThreadContext3(ams::svc::ThreadContext *out) { /* Lock ourselves. */ - KScopedLightLock lk(this->activity_pause_lock); + KScopedLightLock lk(m_activity_pause_lock); /* Get the context. */ { @@ -944,8 +944,8 @@ namespace ams::kern { MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); /* Find the right spot to insert the waiter. */ - auto it = this->waiter_list.begin(); - while (it != this->waiter_list.end()) { + auto it = m_waiter_list.begin(); + while (it != m_waiter_list.end()) { if (it->GetPriority() > thread->GetPriority()) { break; } @@ -954,11 +954,11 @@ namespace ams::kern { /* Keep track of how many kernel waiters we have. */ if (IsKernelAddressKey(thread->GetAddressKey())) { - MESOSPHERE_ABORT_UNLESS((this->num_kernel_waiters++) >= 0); + MESOSPHERE_ABORT_UNLESS((m_num_kernel_waiters++) >= 0); } /* Insert the waiter. */ - this->waiter_list.insert(it, *thread); + m_waiter_list.insert(it, *thread); thread->SetLockOwner(this); } @@ -968,11 +968,11 @@ namespace ams::kern { /* Keep track of how many kernel waiters we have. */ if (IsKernelAddressKey(thread->GetAddressKey())) { - MESOSPHERE_ABORT_UNLESS((this->num_kernel_waiters--) > 0); + MESOSPHERE_ABORT_UNLESS((m_num_kernel_waiters--) > 0); } /* Remove the waiter. */ - this->waiter_list.erase(this->waiter_list.iterator_to(*thread)); + m_waiter_list.erase(m_waiter_list.iterator_to(*thread)); thread->SetLockOwner(nullptr); } @@ -983,7 +983,7 @@ namespace ams::kern { /* We want to inherit priority where possible. */ s32 new_priority = thread->GetBasePriority(); if (thread->HasWaiters()) { - new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority()); + new_priority = std::min(new_priority, thread->m_waiter_list.front().GetPriority()); } /* If the priority we would inherit is not different from ours, don't do anything. */ @@ -1039,16 +1039,16 @@ namespace ams::kern { s32 num_waiters = 0; KThread *next_lock_owner = nullptr; - auto it = this->waiter_list.begin(); - while (it != this->waiter_list.end()) { + auto it = m_waiter_list.begin(); + while (it != m_waiter_list.end()) { if (it->GetAddressKey() == key) { KThread *thread = std::addressof(*it); /* Keep track of how many kernel waiters we have. */ if (IsKernelAddressKey(thread->GetAddressKey())) { - MESOSPHERE_ABORT_UNLESS((this->num_kernel_waiters--) > 0); + MESOSPHERE_ABORT_UNLESS((m_num_kernel_waiters--) > 0); } - it = this->waiter_list.erase(it); + it = m_waiter_list.erase(it); /* Update the next lock owner. */ if (next_lock_owner == nullptr) { @@ -1117,9 +1117,9 @@ namespace ams::kern { KDebug::OnExitThread(this); /* Release the thread resource hint from parent. */ - if (this->parent != nullptr) { - this->parent->ReleaseResource(ams::svc::LimitableResource_ThreadCountMax, 0, 1); - this->resource_limit_release_hint = true; + if (m_parent != nullptr) { + m_parent->ReleaseResource(ams::svc::LimitableResource_ThreadCountMax, 0, 1); + m_resource_limit_release_hint = true; } /* Perform termination. */ @@ -1127,7 +1127,7 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Disallow all suspension. */ - this->suspend_allowed_flags = 0; + m_suspend_allowed_flags = 0; /* Start termination. */ this->StartTermination(); @@ -1162,14 +1162,14 @@ namespace ams::kern { const bool first_request = [&] ALWAYS_INLINE_LAMBDA () -> bool { /* Perform an atomic compare-and-swap from false to true. */ bool expected = false; - return this->termination_requested.compare_exchange_strong(expected, true); + return m_termination_requested.compare_exchange_strong(expected, true); }(); /* If this is the first request, start termination procedure. */ if (first_request) { /* If the thread is in initialized state, just change state to terminated. */ if (this->GetState() == ThreadState_Initialized) { - this->thread_state = ThreadState_Terminated; + m_thread_state = ThreadState_Terminated; return ThreadState_Terminated; } @@ -1178,7 +1178,7 @@ namespace ams::kern { /* If the thread is suspended, continue it. */ if (this->IsSuspended()) { - this->suspend_allowed_flags = 0; + m_suspend_allowed_flags = 0; this->Continue(); } @@ -1189,7 +1189,7 @@ namespace ams::kern { /* If the thread is runnable, send a termination interrupt to other cores. */ if (this->GetState() == ThreadState_Runnable) { - if (const u64 core_mask = this->physical_affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) { + if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) { cpu::DataSynchronizationBarrier(); Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask); } @@ -1237,9 +1237,9 @@ namespace ams::kern { KScopedSchedulerLock sl; - const ThreadState old_state = this->thread_state; - this->thread_state = static_cast((old_state & ~ThreadState_Mask) | (state & ThreadState_Mask)); - if (this->thread_state != old_state) { + const ThreadState old_state = m_thread_state; + m_thread_state = static_cast((old_state & ~ThreadState_Mask) | (state & ThreadState_Mask)); + if (m_thread_state != old_state) { KScheduler::OnThreadStateChanged(this, old_state); } } @@ -1256,10 +1256,10 @@ namespace ams::kern { /* Define helper object to find the thread. */ class IdObjectHelper : public KAutoObjectWithListContainer::ListType::value_type { private: - u64 id; + u64 m_id; public: - constexpr explicit IdObjectHelper(u64 id) : id(id) { /* ... */ } - virtual u64 GetId() const override { return this->id; } + constexpr explicit IdObjectHelper(u64 id) : m_id(id) { /* ... */ } + virtual u64 GetId() const override { return m_id; } }; /* Find the object with the right id. */ diff --git a/libraries/libmesosphere/source/kern_k_thread_local_page.cpp b/libraries/libmesosphere/source/kern_k_thread_local_page.cpp index d95811571..ecdba7b43 100644 --- a/libraries/libmesosphere/source/kern_k_thread_local_page.cpp +++ b/libraries/libmesosphere/source/kern_k_thread_local_page.cpp @@ -21,7 +21,7 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Set that this process owns us. */ - this->owner = process; + m_owner = process; /* Allocate a new page. */ KPageBuffer *page_buf = KPageBuffer::Allocate(); @@ -29,7 +29,7 @@ namespace ams::kern { auto page_buf_guard = SCOPE_GUARD { KPageBuffer::Free(page_buf); }; /* Map the address in. */ - R_TRY(this->owner->GetPageTable().MapPages(std::addressof(this->virt_addr), 1, PageSize, page_buf->GetPhysicalAddress(), KMemoryState_ThreadLocal, KMemoryPermission_UserReadWrite)); + R_TRY(m_owner->GetPageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, page_buf->GetPhysicalAddress(), KMemoryState_ThreadLocal, KMemoryPermission_UserReadWrite)); /* We succeeded. */ page_buf_guard.Cancel(); @@ -41,10 +41,10 @@ namespace ams::kern { /* Get the physical address of the page. */ KPhysicalAddress phys_addr = Null; - MESOSPHERE_ABORT_UNLESS(this->owner->GetPageTable().GetPhysicalAddress(&phys_addr, this->GetAddress())); + MESOSPHERE_ABORT_UNLESS(m_owner->GetPageTable().GetPhysicalAddress(&phys_addr, this->GetAddress())); /* Unmap the page. */ - R_TRY(this->owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState_ThreadLocal)); + R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState_ThreadLocal)); /* Free the page. */ KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(phys_addr)); @@ -54,9 +54,9 @@ namespace ams::kern { KProcessAddress KThreadLocalPage::Reserve() { MESOSPHERE_ASSERT_THIS(); - for (size_t i = 0; i < util::size(this->is_region_free); i++) { - if (this->is_region_free[i]) { - this->is_region_free[i] = false; + for (size_t i = 0; i < util::size(m_is_region_free); i++) { + if (m_is_region_free[i]) { + m_is_region_free[i] = false; return this->GetRegionAddress(i); } } @@ -67,14 +67,14 @@ namespace ams::kern { void KThreadLocalPage::Release(KProcessAddress addr) { MESOSPHERE_ASSERT_THIS(); - this->is_region_free[this->GetRegionIndex(addr)] = true; + m_is_region_free[this->GetRegionIndex(addr)] = true; } void *KThreadLocalPage::GetPointer() const { MESOSPHERE_ASSERT_THIS(); KPhysicalAddress phys_addr; - MESOSPHERE_ABORT_UNLESS(this->owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), this->GetAddress())); + MESOSPHERE_ABORT_UNLESS(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), this->GetAddress())); return static_cast(KPageBuffer::FromPhysicalAddress(phys_addr)); } diff --git a/libraries/libmesosphere/source/kern_k_transfer_memory.cpp b/libraries/libmesosphere/source/kern_k_transfer_memory.cpp index 39a3e197d..283383a87 100644 --- a/libraries/libmesosphere/source/kern_k_transfer_memory.cpp +++ b/libraries/libmesosphere/source/kern_k_transfer_memory.cpp @@ -21,24 +21,24 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Set members. */ - this->owner = GetCurrentProcessPointer(); + m_owner = GetCurrentProcessPointer(); /* Initialize the page group. */ - auto &page_table = this->owner->GetPageTable(); - new (GetPointer(this->page_group)) KPageGroup(page_table.GetBlockInfoManager()); + auto &page_table = m_owner->GetPageTable(); + new (GetPointer(m_page_group)) KPageGroup(page_table.GetBlockInfoManager()); /* Ensure that our page group's state is valid on exit. */ - auto pg_guard = SCOPE_GUARD { GetReference(this->page_group).~KPageGroup(); }; + auto pg_guard = SCOPE_GUARD { GetReference(m_page_group).~KPageGroup(); }; /* Lock the memory. */ - R_TRY(page_table.LockForTransferMemory(GetPointer(this->page_group), addr, size, ConvertToKMemoryPermission(own_perm))); + R_TRY(page_table.LockForTransferMemory(GetPointer(m_page_group), addr, size, ConvertToKMemoryPermission(own_perm))); /* Set remaining tracking members. */ - this->owner->Open(); - this->owner_perm = own_perm; - this->address = addr; - this->is_initialized = true; - this->is_mapped = false; + m_owner->Open(); + m_owner_perm = own_perm; + m_address = addr; + m_is_initialized = true; + m_is_mapped = false; /* We succeeded. */ pg_guard.Cancel(); @@ -49,14 +49,14 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Unlock. */ - if (!this->is_mapped) { - const size_t size = GetReference(this->page_group).GetNumPages() * PageSize; - MESOSPHERE_R_ABORT_UNLESS(this->owner->GetPageTable().UnlockForTransferMemory(this->address, size, GetReference(this->page_group))); + if (!m_is_mapped) { + const size_t size = GetReference(m_page_group).GetNumPages() * PageSize; + MESOSPHERE_R_ABORT_UNLESS(m_owner->GetPageTable().UnlockForTransferMemory(m_address, size, GetReference(m_page_group))); } /* Close the page group. */ - GetReference(this->page_group).Close(); - GetReference(this->page_group).Finalize(); + GetReference(m_page_group).Close(); + GetReference(m_page_group).Finalize(); /* Perform inherited finalization. */ KAutoObjectWithSlabHeapAndContainer::Finalize(); @@ -72,23 +72,23 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Validate the size. */ - R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); /* Validate the permission. */ - R_UNLESS(this->owner_perm == map_perm, svc::ResultInvalidState()); + R_UNLESS(m_owner_perm == map_perm, svc::ResultInvalidState()); /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Ensure we're not already mapped. */ - R_UNLESS(!this->is_mapped, svc::ResultInvalidState()); + R_UNLESS(!m_is_mapped, svc::ResultInvalidState()); /* Map the memory. */ - const KMemoryState state = (this->owner_perm == ams::svc::MemoryPermission_None) ? KMemoryState_Transfered : KMemoryState_SharedTransfered; - R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(this->page_group), state, KMemoryPermission_UserReadWrite)); + const KMemoryState state = (m_owner_perm == ams::svc::MemoryPermission_None) ? KMemoryState_Transfered : KMemoryState_SharedTransfered; + R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(m_page_group), state, KMemoryPermission_UserReadWrite)); /* Mark ourselves as mapped. */ - this->is_mapped = true; + m_is_mapped = true; return ResultSuccess(); } @@ -97,18 +97,18 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); /* Validate the size. */ - R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); /* Lock ourselves. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(m_lock); /* Unmap the memory. */ - const KMemoryState state = (this->owner_perm == ams::svc::MemoryPermission_None) ? KMemoryState_Transfered : KMemoryState_SharedTransfered; - R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(this->page_group), state)); + const KMemoryState state = (m_owner_perm == ams::svc::MemoryPermission_None) ? KMemoryState_Transfered : KMemoryState_SharedTransfered; + R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(m_page_group), state)); /* Mark ourselves as unmapped. */ - MESOSPHERE_ASSERT(this->is_mapped); - this->is_mapped = false; + MESOSPHERE_ASSERT(m_is_mapped); + m_is_mapped = false; return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/kern_k_wait_object.cpp b/libraries/libmesosphere/source/kern_k_wait_object.cpp index 36f21c0b2..f0b18d5b0 100644 --- a/libraries/libmesosphere/source/kern_k_wait_object.cpp +++ b/libraries/libmesosphere/source/kern_k_wait_object.cpp @@ -21,7 +21,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); /* Wake up all the waiting threads. */ - for (KThread &thread : this->wait_list) { + for (KThread &thread : m_wait_list) { thread.Wakeup(); } } @@ -38,19 +38,19 @@ namespace ams::kern { /* Verify that nothing else is already waiting on the object. */ if (timeout > 0) { - R_UNLESS(!this->timer_used, svc::ResultBusy()); + R_UNLESS(!m_timer_used, svc::ResultBusy()); } /* Check that we're not already in use. */ if (timeout >= 0) { /* Verify the timer isn't already in use. */ - R_UNLESS(!this->timer_used, svc::ResultBusy()); + R_UNLESS(!m_timer_used, svc::ResultBusy()); } /* If we need to, register our timeout. */ if (timeout > 0) { /* Mark that we're using the timer. */ - this->timer_used = true; + m_timer_used = true; /* Use the timer. */ timer = std::addressof(Kernel::GetHardwareTimer()); @@ -62,7 +62,7 @@ namespace ams::kern { this->OnTimer(); } else { /* Otherwise, sleep until the timeout occurs. */ - this->wait_list.push_back(GetCurrentThread()); + m_wait_list.push_back(GetCurrentThread()); cur_thread->SetState(KThread::ThreadState_Waiting); cur_thread->SetSyncedObject(nullptr, svc::ResultTimedOut()); } @@ -74,15 +74,15 @@ namespace ams::kern { /* Remove from the timer. */ if (timeout > 0) { - MESOSPHERE_ASSERT(this->timer_used); + MESOSPHERE_ASSERT(m_timer_used); MESOSPHERE_ASSERT(timer != nullptr); timer->CancelTask(this); - this->timer_used = false; + m_timer_used = false; } /* Remove the thread from our queue. */ if (timeout != 0) { - this->wait_list.erase(this->wait_list.iterator_to(GetCurrentThread())); + m_wait_list.erase(m_wait_list.iterator_to(GetCurrentThread())); } } diff --git a/libraries/libmesosphere/source/kern_k_worker_task_manager.cpp b/libraries/libmesosphere/source/kern_k_worker_task_manager.cpp index 5a156cd53..f2ddc9953 100644 --- a/libraries/libmesosphere/source/kern_k_worker_task_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_worker_task_manager.cpp @@ -19,23 +19,23 @@ namespace ams::kern { void KWorkerTaskManager::Initialize(WorkerType wt, s32 priority) { /* Set type, other members already initialized in constructor. */ - this->type = wt; + m_type = wt; /* Reserve a thread from the system limit. */ MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1)); /* Create a new thread. */ - this->thread = KThread::Create(); - MESOSPHERE_ABORT_UNLESS(this->thread != nullptr); + m_thread = KThread::Create(); + MESOSPHERE_ABORT_UNLESS(m_thread != nullptr); /* Launch the new thread. */ - MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(this->thread, ThreadFunction, reinterpret_cast(this), priority, cpu::NumCores - 1)); + MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(m_thread, ThreadFunction, reinterpret_cast(this), priority, cpu::NumCores - 1)); /* Register the new thread. */ - KThread::Register(this->thread); + KThread::Register(m_thread); /* Run the thread. */ - this->thread->Run(); + m_thread->Run(); } void KWorkerTaskManager::AddTask(WorkerType type, KWorkerTask *task) { @@ -58,12 +58,12 @@ namespace ams::kern { if (task == nullptr) { /* If there's nothing to do, set ourselves as waiting. */ - this->active = false; - this->thread->SetState(KThread::ThreadState_Waiting); + m_active = false; + m_thread->SetState(KThread::ThreadState_Waiting); continue; } - this->active = true; + m_active = true; } /* Do the task. */ @@ -73,14 +73,14 @@ namespace ams::kern { KWorkerTask *KWorkerTaskManager::GetTask() { MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); - KWorkerTask *next = this->head_task; + KWorkerTask *next = m_head_task; if (next) { /* Advance the list. */ - if (this->head_task == this->tail_task) { - this->head_task = nullptr; - this->tail_task = nullptr; + if (m_head_task == m_tail_task) { + m_head_task = nullptr; + m_tail_task = nullptr; } else { - this->head_task = this->head_task->GetNextTask(); + m_head_task = m_head_task->GetNextTask(); } /* Clear the next task's next. */ @@ -94,16 +94,16 @@ namespace ams::kern { MESOSPHERE_ASSERT(task->GetNextTask() == nullptr); /* Insert the task. */ - if (this->tail_task) { - this->tail_task->SetNextTask(task); - this->tail_task = task; + if (m_tail_task) { + m_tail_task->SetNextTask(task); + m_tail_task = task; } else { - this->head_task = task; - this->tail_task = task; + m_head_task = task; + m_tail_task = task; /* Make ourselves active if we need to. */ - if (!this->active) { - this->thread->SetState(KThread::ThreadState_Runnable); + if (!m_active) { + m_thread->SetState(KThread::ThreadState_Runnable); } } } diff --git a/libraries/libmesosphere/source/kern_k_writable_event.cpp b/libraries/libmesosphere/source/kern_k_writable_event.cpp index 76950456a..6dbd5ba51 100644 --- a/libraries/libmesosphere/source/kern_k_writable_event.cpp +++ b/libraries/libmesosphere/source/kern_k_writable_event.cpp @@ -19,22 +19,22 @@ namespace ams::kern { void KWritableEvent::Initialize(KEvent *p) { /* Set parent, open a reference to the readable event. */ - this->parent = p; - this->parent->GetReadableEvent().Open(); + m_parent = p; + m_parent->GetReadableEvent().Open(); } Result KWritableEvent::Signal() { - return this->parent->GetReadableEvent().Signal(); + return m_parent->GetReadableEvent().Signal(); } Result KWritableEvent::Clear() { - return this->parent->GetReadableEvent().Clear(); + return m_parent->GetReadableEvent().Clear(); } void KWritableEvent::Destroy() { /* Close our references. */ - this->parent->GetReadableEvent().Close(); - this->parent->Close(); + m_parent->GetReadableEvent().Close(); + m_parent->Close(); } }