kern/util: use custom atomics wrapper to substantially improve codegen

This commit is contained in:
Michael Scire 2021-10-19 15:24:15 -07:00
parent 52332e8d75
commit d74f364107
26 changed files with 688 additions and 260 deletions

View file

@ -142,8 +142,6 @@ $(OFILES_SRC) : $(HFILES_BIN)
kern_libc_generic.o: CFLAGS += -fno-builtin kern_libc_generic.o: CFLAGS += -fno-builtin
kern_k_auto_object.o kern_k_debug_base_process_holder.o: CXXFLAGS += -fno-lto
#--------------------------------------------------------------------------------- #---------------------------------------------------------------------------------
%_bin.h %.bin.o : %.bin %_bin.h %.bin.o : %.bin
#--------------------------------------------------------------------------------- #---------------------------------------------------------------------------------

View file

@ -41,8 +41,48 @@ namespace ams::kern {
virtual const char *GetTypeName() { return GetStaticTypeName(); } \ virtual const char *GetTypeName() { return GetStaticTypeName(); } \
private: private:
class KAutoObject { class KAutoObject {
public:
class ReferenceCount {
NON_COPYABLE(ReferenceCount);
NON_MOVEABLE(ReferenceCount);
private:
using Storage = u32;
private:
util::Atomic<Storage> m_value;
public:
ALWAYS_INLINE explicit ReferenceCount() { /* ... */ }
constexpr ALWAYS_INLINE explicit ReferenceCount(Storage v) : m_value(v) { /* ... */ }
ALWAYS_INLINE void operator=(Storage v) { m_value = v; }
ALWAYS_INLINE Storage GetValue() const { return m_value.Load(); }
ALWAYS_INLINE bool Open() {
/* Atomically increment the reference count, only if it's positive. */
u32 cur = m_value.Load<std::memory_order_relaxed>();
do {
if (AMS_UNLIKELY(cur == 0)) {
MESOSPHERE_AUDIT(cur != 0);
return false;
}
MESOSPHERE_ABORT_UNLESS(cur < cur + 1);
} while (AMS_UNLIKELY(!m_value.CompareExchangeWeak<std::memory_order_relaxed>(cur, cur + 1)));
return true;
}
ALWAYS_INLINE bool Close() {
/* Atomically decrement the reference count, not allowing it to become negative. */
u32 cur = m_value.Load<std::memory_order_relaxed>();
do {
MESOSPHERE_ABORT_UNLESS(cur > 0);
} while (AMS_UNLIKELY(!m_value.CompareExchangeWeak<std::memory_order_relaxed>(cur, cur - 1)));
/* Return whether the object was closed. */
return cur - 1 == 0;
}
};
protected: protected:
class TypeObj { class TypeObj {
private: private:
@ -74,7 +114,7 @@ namespace ams::kern {
MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject); MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
private: private:
KAutoObject *m_next_closed_object; KAutoObject *m_next_closed_object;
std::atomic<u32> m_ref_count; ReferenceCount m_ref_count;
#if defined(MESOSPHERE_ENABLE_DEVIRTUALIZED_DYNAMIC_CAST) #if defined(MESOSPHERE_ENABLE_DEVIRTUALIZED_DYNAMIC_CAST)
ClassTokenType m_class_token; ClassTokenType m_class_token;
#endif #endif
@ -98,7 +138,7 @@ namespace ams::kern {
virtual KProcess *GetOwner() const { return nullptr; } virtual KProcess *GetOwner() const { return nullptr; }
u32 GetReferenceCount() const { u32 GetReferenceCount() const {
return m_ref_count.load(); return m_ref_count.GetValue();
} }
ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const { ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const {
@ -141,8 +181,19 @@ namespace ams::kern {
} }
} }
bool Open(); NOINLINE bool Open() {
void Close(); MESOSPHERE_ASSERT_THIS();
return m_ref_count.Open();
}
NOINLINE void Close() {
MESOSPHERE_ASSERT_THIS();
if (m_ref_count.Close()) {
this->ScheduleDestruction();
}
}
private: private:
/* NOTE: This has to be defined *after* KThread is defined. */ /* NOTE: This has to be defined *after* KThread is defined. */
/* Nintendo seems to handle this by defining Open/Close() in a cpp, but we'd like them to remain in headers. */ /* Nintendo seems to handle this by defining Open/Close() in a cpp, but we'd like them to remain in headers. */
@ -256,5 +307,52 @@ namespace ams::kern {
constexpr ALWAYS_INLINE bool IsNotNull() const { return m_obj != nullptr; } constexpr ALWAYS_INLINE bool IsNotNull() const { return m_obj != nullptr; }
}; };
template<typename T> requires std::derived_from<T, KAutoObject>
class KSharedAutoObject {
private:
T *m_object;
KAutoObject::ReferenceCount m_ref_count;
public:
explicit KSharedAutoObject() : m_object(nullptr) { /* ... */ }
void Attach(T *obj) {
MESOSPHERE_ASSERT(m_object == nullptr);
/* Set our object. */
m_object = obj;
/* Open reference to our object. */
m_object->Open();
/* Set our reference count. */
m_ref_count = 1;
}
bool Open() {
return m_ref_count.Open();
}
void Close() {
if (m_ref_count.Close()) {
this->Detach();
}
}
ALWAYS_INLINE T *Get() const {
return m_object;
}
private:
void Detach() {
/* Close our object, if we have one. */
if (T * const object = m_object; AMS_LIKELY(object != nullptr)) {
/* Set our object to a debug sentinel value, which will cause crash if accessed. */
m_object = reinterpret_cast<T *>(1);
/* Close reference to our object. */
object->Close();
}
}
};
} }

View file

@ -28,12 +28,12 @@ namespace ams::kern {
class KClientPort final : public KSynchronizationObject { class KClientPort final : public KSynchronizationObject {
MESOSPHERE_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject); MESOSPHERE_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
private: private:
std::atomic<s32> m_num_sessions; util::Atomic<s32> m_num_sessions;
std::atomic<s32> m_peak_sessions; util::Atomic<s32> m_peak_sessions;
s32 m_max_sessions; s32 m_max_sessions;
KPort *m_parent; KPort *m_parent;
public: public:
constexpr KClientPort() : m_num_sessions(), m_peak_sessions(), m_max_sessions(), m_parent() { /* ... */ } constexpr KClientPort() : m_num_sessions(0), m_peak_sessions(0), m_max_sessions(), m_parent() { /* ... */ }
void Initialize(KPort *parent, s32 max_sessions); void Initialize(KPort *parent, s32 max_sessions);
void OnSessionFinalized(); void OnSessionFinalized();
@ -41,8 +41,8 @@ namespace ams::kern {
constexpr const KPort *GetParent() const { return m_parent; } constexpr const KPort *GetParent() const { return m_parent; }
ALWAYS_INLINE s32 GetNumSessions() const { return m_num_sessions; } ALWAYS_INLINE s32 GetNumSessions() const { return m_num_sessions.Load(); }
ALWAYS_INLINE s32 GetPeakSessions() const { return m_peak_sessions; } ALWAYS_INLINE s32 GetPeakSessions() const { return m_peak_sessions.Load(); }
ALWAYS_INLINE s32 GetMaxSessions() const { return m_max_sessions; } ALWAYS_INLINE s32 GetMaxSessions() const { return m_max_sessions; }
bool IsLight() const; bool IsLight() const;

View file

@ -25,26 +25,10 @@ namespace ams::kern {
class KDebugBase : public KSynchronizationObject { class KDebugBase : public KSynchronizationObject {
protected: protected:
using DebugEventList = util::IntrusiveListBaseTraits<KEventInfo>::ListType; using DebugEventList = util::IntrusiveListBaseTraits<KEventInfo>::ListType;
private:
class ProcessHolder {
private:
friend class KDebugBase;
private:
KProcess *m_process;
std::atomic<u32> m_ref_count;
private:
explicit ProcessHolder() : m_process(nullptr) { /* ... */ }
void Attach(KProcess *process);
void Detach();
bool Open();
void Close();
};
private: private:
DebugEventList m_event_info_list; DebugEventList m_event_info_list;
u32 m_continue_flags; u32 m_continue_flags;
ProcessHolder m_process_holder; KSharedAutoObject<KProcess> m_process_holder;
KLightLock m_lock; KLightLock m_lock;
KProcess::State m_old_process_state; KProcess::State m_old_process_state;
bool m_is_attached; bool m_is_attached;
@ -89,7 +73,7 @@ namespace ams::kern {
} }
ALWAYS_INLINE KProcess *GetProcessUnsafe() const { ALWAYS_INLINE KProcess *GetProcessUnsafe() const {
return m_process_holder.m_process; return m_process_holder.Get();
} }
private: private:
void PushDebugEvent(ams::svc::DebugEvent event, uintptr_t param0 = 0, uintptr_t param1 = 0, uintptr_t param2 = 0, uintptr_t param3 = 0, uintptr_t param4 = 0); void PushDebugEvent(ams::svc::DebugEvent event, uintptr_t param0 = 0, uintptr_t param1 = 0, uintptr_t param2 = 0, uintptr_t param3 = 0, uintptr_t param4 = 0);

View file

@ -29,9 +29,9 @@ namespace ams::kern {
private: private:
using PageBuffer = KDynamicPageManager::PageBuffer; using PageBuffer = KDynamicPageManager::PageBuffer;
private: private:
std::atomic<size_t> m_used{}; util::Atomic<size_t> m_used{0};
std::atomic<size_t> m_peak{}; util::Atomic<size_t> m_peak{0};
std::atomic<size_t> m_count{}; util::Atomic<size_t> m_count{0};
KVirtualAddress m_address{}; KVirtualAddress m_address{};
size_t m_size{}; size_t m_size{};
public: public:
@ -39,9 +39,9 @@ namespace ams::kern {
constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return m_address; } constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return m_address; }
constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; } constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; }
constexpr ALWAYS_INLINE size_t GetUsed() const { return m_used.load(); } constexpr ALWAYS_INLINE size_t GetUsed() const { return m_used.Load(); }
constexpr ALWAYS_INLINE size_t GetPeak() const { return m_peak.load(); } constexpr ALWAYS_INLINE size_t GetPeak() const { return m_peak.Load(); }
constexpr ALWAYS_INLINE size_t GetCount() const { return m_count.load(); } constexpr ALWAYS_INLINE size_t GetCount() const { return m_count.Load(); }
constexpr ALWAYS_INLINE bool IsInRange(KVirtualAddress addr) const { constexpr ALWAYS_INLINE bool IsInRange(KVirtualAddress addr) const {
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1; return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
@ -58,7 +58,7 @@ namespace ams::kern {
KSlabHeapImpl::Initialize(); KSlabHeapImpl::Initialize();
/* Allocate until we have the correct number of objects. */ /* Allocate until we have the correct number of objects. */
while (m_count.load() < num_objects) { while (m_count.Load() < num_objects) {
auto *allocated = reinterpret_cast<T *>(page_allocator->Allocate()); auto *allocated = reinterpret_cast<T *>(page_allocator->Allocate());
MESOSPHERE_ABORT_UNLESS(allocated != nullptr); MESOSPHERE_ABORT_UNLESS(allocated != nullptr);
@ -66,7 +66,7 @@ namespace ams::kern {
KSlabHeapImpl::Free(allocated + i); KSlabHeapImpl::Free(allocated + i);
} }
m_count.fetch_add(sizeof(PageBuffer) / sizeof(T)); m_count.FetchAdd(sizeof(PageBuffer) / sizeof(T));
} }
} }
@ -89,7 +89,7 @@ namespace ams::kern {
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) { for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
KSlabHeapImpl::Free(allocated + i); KSlabHeapImpl::Free(allocated + i);
} }
m_count.fetch_add(sizeof(PageBuffer) / sizeof(T)); m_count.FetchAdd(sizeof(PageBuffer) / sizeof(T));
} }
} }
} }
@ -99,10 +99,10 @@ namespace ams::kern {
std::construct_at(allocated); std::construct_at(allocated);
/* Update our tracking. */ /* Update our tracking. */
size_t used = m_used.fetch_add(1) + 1; size_t used = m_used.FetchAdd(1) + 1;
size_t peak = m_peak.load(); size_t peak = m_peak.Load();
while (peak < used) { while (peak < used) {
if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) { if (m_peak.CompareExchangeWeak<std::memory_order_relaxed>(peak, used)) {
break; break;
} }
} }
@ -113,7 +113,7 @@ namespace ams::kern {
ALWAYS_INLINE void Free(T *t) { ALWAYS_INLINE void Free(T *t) {
KSlabHeapImpl::Free(t); KSlabHeapImpl::Free(t);
m_used.fetch_sub(1); m_used.FetchSub(1);
} }
}; };

View file

@ -23,7 +23,7 @@ namespace ams::kern {
class KLightLock { class KLightLock {
private: private:
std::atomic<uintptr_t> m_tag; util::Atomic<uintptr_t> m_tag;
public: public:
constexpr KLightLock() : m_tag(0) { /* ... */ } constexpr KLightLock() : m_tag(0) { /* ... */ }
@ -31,12 +31,11 @@ namespace ams::kern {
MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT_THIS();
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()); const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer());
const uintptr_t cur_thread_tag = (cur_thread | 1);
while (true) { while (true) {
uintptr_t old_tag = m_tag.load(std::memory_order_relaxed); uintptr_t old_tag = m_tag.Load<std::memory_order_relaxed>();
while (!m_tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) { while (!m_tag.CompareExchangeWeak<std::memory_order_acquire>(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1))) {
/* ... */ /* ... */
} }
@ -52,7 +51,7 @@ namespace ams::kern {
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()); const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer());
uintptr_t expected = cur_thread; uintptr_t expected = cur_thread;
if (!m_tag.compare_exchange_strong(expected, 0, std::memory_order_release)) { if (!m_tag.CompareExchangeStrong<std::memory_order_release>(expected, 0)) {
this->UnlockSlowPath(cur_thread); this->UnlockSlowPath(cur_thread);
} }
} }
@ -60,8 +59,8 @@ namespace ams::kern {
bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread); bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
void UnlockSlowPath(uintptr_t cur_thread); void UnlockSlowPath(uintptr_t cur_thread);
ALWAYS_INLINE bool IsLocked() const { return m_tag.load() != 0; } ALWAYS_INLINE bool IsLocked() const { return m_tag.Load() != 0; }
ALWAYS_INLINE bool IsLockedByCurrentThread() const { return (m_tag.load() | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); } ALWAYS_INLINE bool IsLockedByCurrentThread() const { return (m_tag.Load() | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); }
}; };
using KScopedLightLock = KScopedLock<KLightLock>; using KScopedLightLock = KScopedLock<KLightLock>;

View file

@ -58,7 +58,7 @@ namespace ams::kern {
using TLPIterator = TLPTree::iterator; using TLPIterator = TLPTree::iterator;
private: private:
KProcessPageTable m_page_table{}; KProcessPageTable m_page_table{};
std::atomic<size_t> m_used_kernel_memory_size{}; util::Atomic<size_t> m_used_kernel_memory_size{0};
TLPTree m_fully_used_tlp_tree{}; TLPTree m_fully_used_tlp_tree{};
TLPTree m_partially_used_tlp_tree{}; TLPTree m_partially_used_tlp_tree{};
s32 m_ideal_core_id{}; s32 m_ideal_core_id{};
@ -77,7 +77,7 @@ namespace ams::kern {
bool m_is_initialized{}; bool m_is_initialized{};
bool m_is_application{}; bool m_is_application{};
char m_name[13]{}; char m_name[13]{};
std::atomic<u16> m_num_running_threads{}; util::Atomic<u16> m_num_running_threads{0};
u32 m_flags{}; u32 m_flags{};
KMemoryManager::Pool m_memory_pool{}; KMemoryManager::Pool m_memory_pool{};
s64 m_schedule_count{}; s64 m_schedule_count{};
@ -109,14 +109,14 @@ namespace ams::kern {
KThread *m_running_threads[cpu::NumCores]{}; KThread *m_running_threads[cpu::NumCores]{};
u64 m_running_thread_idle_counts[cpu::NumCores]{}; u64 m_running_thread_idle_counts[cpu::NumCores]{};
KThread *m_pinned_threads[cpu::NumCores]{}; KThread *m_pinned_threads[cpu::NumCores]{};
std::atomic<s64> m_cpu_time{}; util::Atomic<s64> m_cpu_time{0};
std::atomic<s64> m_num_process_switches{}; util::Atomic<s64> m_num_process_switches{0};
std::atomic<s64> m_num_thread_switches{}; util::Atomic<s64> m_num_thread_switches{0};
std::atomic<s64> m_num_fpu_switches{}; util::Atomic<s64> m_num_fpu_switches{0};
std::atomic<s64> m_num_supervisor_calls{}; util::Atomic<s64> m_num_supervisor_calls{0};
std::atomic<s64> m_num_ipc_messages{}; util::Atomic<s64> m_num_ipc_messages{0};
std::atomic<s64> m_num_ipc_replies{}; util::Atomic<s64> m_num_ipc_replies{0};
std::atomic<s64> m_num_ipc_receives{}; util::Atomic<s64> m_num_ipc_receives{0};
KDynamicPageManager m_dynamic_page_manager{}; KDynamicPageManager m_dynamic_page_manager{};
KMemoryBlockSlabManager m_memory_block_slab_manager{}; KMemoryBlockSlabManager m_memory_block_slab_manager{};
KBlockInfoManager m_block_info_manager{}; KBlockInfoManager m_block_info_manager{};
@ -288,8 +288,8 @@ namespace ams::kern {
KThread *GetExceptionThread() const { return m_exception_thread; } KThread *GetExceptionThread() const { return m_exception_thread; }
void AddCpuTime(s64 diff) { m_cpu_time += diff; } void AddCpuTime(s64 diff) { m_cpu_time.FetchAdd(diff); }
s64 GetCpuTime() { return m_cpu_time; } s64 GetCpuTime() { return m_cpu_time.Load(); }
constexpr s64 GetScheduledCount() const { return m_schedule_count; } constexpr s64 GetScheduledCount() const { return m_schedule_count; }
void IncrementScheduledCount() { ++m_schedule_count; } void IncrementScheduledCount() { ++m_schedule_count; }

View file

@ -39,14 +39,16 @@ namespace ams::kern {
static_assert(ams::svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority); static_assert(ams::svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
struct SchedulingState { struct SchedulingState {
std::atomic<u8> needs_scheduling; util::Atomic<u8> needs_scheduling{false};
bool interrupt_task_runnable; bool interrupt_task_runnable{false};
bool should_count_idle; bool should_count_idle{false};
u64 idle_count; u64 idle_count{0};
KThread *highest_priority_thread; KThread *highest_priority_thread{nullptr};
void *idle_thread_stack; void *idle_thread_stack{nullptr};
KThread *prev_thread; util::Atomic<KThread *> prev_thread{nullptr};
KInterruptTaskManager *interrupt_task_manager; KInterruptTaskManager *interrupt_task_manager{nullptr};
constexpr SchedulingState() = default;
}; };
private: private:
friend class KScopedSchedulerLock; friend class KScopedSchedulerLock;
@ -58,7 +60,7 @@ namespace ams::kern {
s32 m_core_id; s32 m_core_id;
s64 m_last_context_switch_time; s64 m_last_context_switch_time;
KThread *m_idle_thread; KThread *m_idle_thread;
std::atomic<KThread *> m_current_thread; util::Atomic<KThread *> m_current_thread;
public: public:
constexpr KScheduler() constexpr KScheduler()
: m_state(), m_is_active(false), m_core_id(0), m_last_context_switch_time(0), m_idle_thread(nullptr), m_current_thread(nullptr) : m_state(), m_is_active(false), m_core_id(0), m_last_context_switch_time(0), m_idle_thread(nullptr), m_current_thread(nullptr)
@ -98,11 +100,11 @@ namespace ams::kern {
} }
ALWAYS_INLINE KThread *GetPreviousThread() const { ALWAYS_INLINE KThread *GetPreviousThread() const {
return m_state.prev_thread; return m_state.prev_thread.Load<std::memory_order_relaxed>();
} }
ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const { ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const {
return m_current_thread; return m_current_thread.Load();
} }
ALWAYS_INLINE s64 GetLastContextSwitchTime() const { ALWAYS_INLINE s64 GetLastContextSwitchTime() const {
@ -182,7 +184,7 @@ namespace ams::kern {
GetCurrentThread().EnableDispatch(); GetCurrentThread().EnableDispatch();
if (m_state.needs_scheduling.load()) { if (m_state.needs_scheduling.Load()) {
/* Disable interrupts, and then check again if rescheduling is needed. */ /* Disable interrupts, and then check again if rescheduling is needed. */
KScopedInterruptDisable intr_disable; KScopedInterruptDisable intr_disable;
@ -192,7 +194,7 @@ namespace ams::kern {
ALWAYS_INLINE void RescheduleCurrentCoreImpl() { ALWAYS_INLINE void RescheduleCurrentCoreImpl() {
/* Check that scheduling is needed. */ /* Check that scheduling is needed. */
if (AMS_LIKELY(m_state.needs_scheduling.load())) { if (AMS_LIKELY(m_state.needs_scheduling.Load())) {
GetCurrentThread().DisableDispatch(); GetCurrentThread().DisableDispatch();
this->Schedule(); this->Schedule();
GetCurrentThread().EnableDispatch(); GetCurrentThread().EnableDispatch();

View file

@ -35,7 +35,7 @@ namespace ams::kern {
ServerClosed = 3, ServerClosed = 3,
}; };
private: private:
std::atomic<std::underlying_type<State>::type> m_atomic_state; util::Atomic<std::underlying_type<State>::type> m_atomic_state;
bool m_initialized; bool m_initialized;
KServerSession m_server; KServerSession m_server;
KClientSession m_client; KClientSession m_client;
@ -48,7 +48,7 @@ namespace ams::kern {
} }
ALWAYS_INLINE State GetState() const { ALWAYS_INLINE State GetState() const {
return static_cast<State>(m_atomic_state.load()); return static_cast<State>(m_atomic_state.Load());
} }
public: public:
constexpr KSession() constexpr KSession()

View file

@ -76,21 +76,18 @@ namespace ams::kern {
NON_MOVEABLE(KSlabHeapBase); NON_MOVEABLE(KSlabHeapBase);
private: private:
size_t m_obj_size{}; size_t m_obj_size{};
uintptr_t m_peak{}; util::Atomic<uintptr_t> m_peak{0};
uintptr_t m_start{}; uintptr_t m_start{};
uintptr_t m_end{}; uintptr_t m_end{};
private: private:
ALWAYS_INLINE void UpdatePeakImpl(uintptr_t obj) { ALWAYS_INLINE void UpdatePeakImpl(uintptr_t obj) {
static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free);
std::atomic_ref<uintptr_t> peak_ref(m_peak);
const uintptr_t alloc_peak = obj + this->GetObjectSize(); const uintptr_t alloc_peak = obj + this->GetObjectSize();
uintptr_t cur_peak = m_peak; uintptr_t cur_peak = m_peak.Load<std::memory_order_relaxed>();
do { do {
if (alloc_peak <= cur_peak) { if (alloc_peak <= cur_peak) {
break; break;
} }
} while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak)); } while (!m_peak.CompareExchangeStrong(cur_peak, alloc_peak));
} }
public: public:
constexpr KSlabHeapBase() = default; constexpr KSlabHeapBase() = default;
@ -113,7 +110,8 @@ namespace ams::kern {
const size_t num_obj = (memory_size / obj_size); const size_t num_obj = (memory_size / obj_size);
m_start = reinterpret_cast<uintptr_t>(memory); m_start = reinterpret_cast<uintptr_t>(memory);
m_end = m_start + num_obj * obj_size; m_end = m_start + num_obj * obj_size;
m_peak = m_start;
m_peak.Store<std::memory_order_relaxed>(m_start);
/* Free the objects. */ /* Free the objects. */
u8 *cur = reinterpret_cast<u8 *>(m_end); u8 *cur = reinterpret_cast<u8 *>(m_end);
@ -177,7 +175,7 @@ namespace ams::kern {
} }
ALWAYS_INLINE size_t GetPeakIndex() const { ALWAYS_INLINE size_t GetPeakIndex() const {
return this->GetObjectIndex(reinterpret_cast<const void *>(m_peak)); return this->GetObjectIndex(reinterpret_cast<const void *>(m_peak.Load<std::memory_order_relaxed>()));
} }
ALWAYS_INLINE uintptr_t GetSlabHeapAddress() const { ALWAYS_INLINE uintptr_t GetSlabHeapAddress() const {

View file

@ -37,7 +37,7 @@ namespace ams::kern {
virtual void OnFinalizeSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); } virtual void OnFinalizeSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); }
void NotifyAvailable(Result result); void NotifyAvailable(Result result);
void NotifyAvailable() { ALWAYS_INLINE void NotifyAvailable() {
return this->NotifyAvailable(ResultSuccess()); return this->NotifyAvailable(ResultSuccess());
} }
public: public:

View file

@ -89,7 +89,7 @@ namespace ams::kern {
KThreadContext *context; KThreadContext *context;
KThread *cur_thread; KThread *cur_thread;
s16 disable_count; s16 disable_count;
std::atomic<u8> dpc_flags; util::Atomic<u8> dpc_flags;
u8 current_svc_id; u8 current_svc_id;
bool is_calling_svc; bool is_calling_svc;
bool is_in_exception_handler; bool is_in_exception_handler;
@ -177,7 +177,7 @@ namespace ams::kern {
static_assert(ams::util::HasRedBlackKeyType<ConditionVariableComparator>); static_assert(ams::util::HasRedBlackKeyType<ConditionVariableComparator>);
static_assert(std::same_as<ams::util::RedBlackKeyType<ConditionVariableComparator, void>, ConditionVariableComparator::RedBlackKeyType>); static_assert(std::same_as<ams::util::RedBlackKeyType<ConditionVariableComparator, void>, ConditionVariableComparator::RedBlackKeyType>);
private: private:
static constinit inline std::atomic<u64> s_next_thread_id = 0; static constinit inline util::Atomic<u64> s_next_thread_id{0};
private: private:
util::IntrusiveListNode m_process_list_node{}; util::IntrusiveListNode m_process_list_node{};
util::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{}; util::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
@ -192,7 +192,7 @@ namespace ams::kern {
u64 m_virtual_affinity_mask{}; u64 m_virtual_affinity_mask{};
KAffinityMask m_physical_affinity_mask{}; KAffinityMask m_physical_affinity_mask{};
u64 m_thread_id{}; u64 m_thread_id{};
std::atomic<s64> m_cpu_time{}; util::Atomic<s64> m_cpu_time{0};
KProcessAddress m_address_key{}; KProcessAddress m_address_key{};
KProcess *m_parent{}; KProcess *m_parent{};
void *m_kernel_stack_top{}; void *m_kernel_stack_top{};
@ -227,7 +227,7 @@ namespace ams::kern {
s32 m_original_physical_ideal_core_id{}; s32 m_original_physical_ideal_core_id{};
s32 m_num_core_migration_disables{}; s32 m_num_core_migration_disables{};
ThreadState m_thread_state{}; ThreadState m_thread_state{};
std::atomic<u8> m_termination_requested{}; util::Atomic<u8> m_termination_requested{false};
bool m_wait_cancelled{}; bool m_wait_cancelled{};
bool m_cancellable{}; bool m_cancellable{};
bool m_signaled{}; bool m_signaled{};
@ -348,15 +348,15 @@ namespace ams::kern {
#endif #endif
ALWAYS_INLINE void RegisterDpc(DpcFlag flag) { ALWAYS_INLINE void RegisterDpc(DpcFlag flag) {
this->GetStackParameters().dpc_flags.fetch_or(flag); this->GetStackParameters().dpc_flags.FetchOr(flag);
} }
ALWAYS_INLINE void ClearDpc(DpcFlag flag) { ALWAYS_INLINE void ClearDpc(DpcFlag flag) {
this->GetStackParameters().dpc_flags.fetch_and(~flag); this->GetStackParameters().dpc_flags.FetchAnd(~flag);
} }
ALWAYS_INLINE u8 GetDpc() const { ALWAYS_INLINE u8 GetDpc() const {
return this->GetStackParameters().dpc_flags.load(); return this->GetStackParameters().dpc_flags.Load();
} }
ALWAYS_INLINE bool HasDpc() const { ALWAYS_INLINE bool HasDpc() const {
@ -516,7 +516,7 @@ namespace ams::kern {
m_closed_object = object; m_closed_object = object;
/* Schedule destruction DPC. */ /* Schedule destruction DPC. */
if ((this->GetStackParameters().dpc_flags.load(std::memory_order_relaxed) & DpcFlag_PerformDestruction) == 0) { if ((this->GetStackParameters().dpc_flags.Load<std::memory_order_relaxed>() & DpcFlag_PerformDestruction) == 0) {
this->RegisterDpc(DpcFlag_PerformDestruction); this->RegisterDpc(DpcFlag_PerformDestruction);
} }
} }
@ -544,12 +544,12 @@ namespace ams::kern {
constexpr bool IsAttachedToDebugger() const { return m_debug_attached; } constexpr bool IsAttachedToDebugger() const { return m_debug_attached; }
void AddCpuTime(s32 core_id, s64 amount) { void AddCpuTime(s32 core_id, s64 amount) {
m_cpu_time += amount; m_cpu_time.FetchAdd(amount);
/* TODO: Debug kernels track per-core tick counts. Should we? */ /* TODO: Debug kernels track per-core tick counts. Should we? */
MESOSPHERE_UNUSED(core_id); MESOSPHERE_UNUSED(core_id);
} }
s64 GetCpuTime() const { return m_cpu_time.load(); } s64 GetCpuTime() const { return m_cpu_time.Load(); }
s64 GetCpuTime(s32 core_id) const { s64 GetCpuTime(s32 core_id) const {
MESOSPHERE_ABORT_UNLESS(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores)); MESOSPHERE_ABORT_UNLESS(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
@ -591,7 +591,7 @@ namespace ams::kern {
ALWAYS_INLINE void *GetKernelStackTop() const { return m_kernel_stack_top; } ALWAYS_INLINE void *GetKernelStackTop() const { return m_kernel_stack_top; }
ALWAYS_INLINE bool IsTerminationRequested() const { ALWAYS_INLINE bool IsTerminationRequested() const {
return m_termination_requested.load() || this->GetRawState() == ThreadState_Terminated; return m_termination_requested.Load() || this->GetRawState() == ThreadState_Terminated;
} }
size_t GetKernelStackUsage() const; size_t GetKernelStackUsage() const;

View file

@ -96,7 +96,7 @@ namespace ams::kern::arch::arm64::cpu {
KLightLock m_lock; KLightLock m_lock;
KLightLock m_cv_lock; KLightLock m_cv_lock;
KLightConditionVariable m_cv; KLightConditionVariable m_cv;
std::atomic<u64> m_target_cores; util::Atomic<u64> m_target_cores;
volatile Operation m_operation; volatile Operation m_operation;
private: private:
static void ThreadFunction(uintptr_t _this) { static void ThreadFunction(uintptr_t _this) {
@ -109,7 +109,7 @@ namespace ams::kern::arch::arm64::cpu {
/* Wait for a request to come in. */ /* Wait for a request to come in. */
{ {
KScopedLightLock lk(m_cv_lock); KScopedLightLock lk(m_cv_lock);
while ((m_target_cores.load() & (1ul << core_id)) == 0) { while ((m_target_cores.Load() & (1ul << core_id)) == 0) {
m_cv.Wait(std::addressof(m_cv_lock)); m_cv.Wait(std::addressof(m_cv_lock));
} }
} }
@ -120,7 +120,7 @@ namespace ams::kern::arch::arm64::cpu {
/* Broadcast, if there's nothing pending. */ /* Broadcast, if there's nothing pending. */
{ {
KScopedLightLock lk(m_cv_lock); KScopedLightLock lk(m_cv_lock);
if (m_target_cores.load() == 0) { if (m_target_cores.Load() == 0) {
m_cv.Broadcast(); m_cv.Broadcast();
} }
} }
@ -129,7 +129,7 @@ namespace ams::kern::arch::arm64::cpu {
void ProcessOperation(); void ProcessOperation();
public: public:
constexpr KCacheHelperInterruptHandler() : KInterruptHandler(), m_lock(), m_cv_lock(), m_cv(), m_target_cores(), m_operation(Operation::Idle) { /* ... */ } constexpr KCacheHelperInterruptHandler() : KInterruptHandler(), m_lock(), m_cv_lock(), m_cv(), m_target_cores(0), m_operation(Operation::Idle) { /* ... */ }
void Initialize(s32 core_id) { void Initialize(s32 core_id) {
/* Reserve a thread from the system limit. */ /* Reserve a thread from the system limit. */
@ -163,7 +163,7 @@ namespace ams::kern::arch::arm64::cpu {
if ((op == Operation::InstructionMemoryBarrier) || (Kernel::GetState() == Kernel::State::Initializing)) { if ((op == Operation::InstructionMemoryBarrier) || (Kernel::GetState() == Kernel::State::Initializing)) {
/* Check that there's no on-going operation. */ /* Check that there's no on-going operation. */
MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle); MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle);
MESOSPHERE_ABORT_UNLESS(m_target_cores.load() == 0); MESOSPHERE_ABORT_UNLESS(m_target_cores.Load() == 0);
/* Set operation. */ /* Set operation. */
m_operation = op; m_operation = op;
@ -171,13 +171,13 @@ namespace ams::kern::arch::arm64::cpu {
/* For certain operations, we want to send an interrupt. */ /* For certain operations, we want to send an interrupt. */
m_target_cores = other_cores_mask; m_target_cores = other_cores_mask;
const u64 target_mask = m_target_cores.load(); const u64 target_mask = m_target_cores.Load();
DataSynchronizationBarrier(); DataSynchronizationBarrier();
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask); Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
this->ProcessOperation(); this->ProcessOperation();
while (m_target_cores.load() != 0) { while (m_target_cores.Load() != 0) {
cpu::Yield(); cpu::Yield();
} }
@ -189,7 +189,7 @@ namespace ams::kern::arch::arm64::cpu {
/* Check that there's no on-going operation. */ /* Check that there's no on-going operation. */
MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle); MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle);
MESOSPHERE_ABORT_UNLESS(m_target_cores.load() == 0); MESOSPHERE_ABORT_UNLESS(m_target_cores.Load() == 0);
/* Set operation. */ /* Set operation. */
m_operation = op; m_operation = op;
@ -199,7 +199,7 @@ namespace ams::kern::arch::arm64::cpu {
/* Use the condvar. */ /* Use the condvar. */
m_cv.Broadcast(); m_cv.Broadcast();
while (m_target_cores.load() != 0) { while (m_target_cores.Load() != 0) {
m_cv.Wait(std::addressof(m_cv_lock)); m_cv.Wait(std::addressof(m_cv_lock));
} }
@ -287,7 +287,7 @@ namespace ams::kern::arch::arm64::cpu {
break; break;
} }
m_target_cores &= ~(1ul << GetCurrentCoreId()); m_target_cores.FetchAnd(~(1ul << GetCurrentCoreId()));
} }
ALWAYS_INLINE void SetEventLocally() { ALWAYS_INLINE void SetEventLocally() {

View file

@ -27,35 +27,4 @@ namespace ams::kern {
return obj; return obj;
} }
NOINLINE bool KAutoObject::Open() {
MESOSPHERE_ASSERT_THIS();
/* Atomically increment the reference count, only if it's positive. */
u32 cur_ref_count = m_ref_count.load(std::memory_order_relaxed);
do {
if (AMS_UNLIKELY(cur_ref_count == 0)) {
MESOSPHERE_AUDIT(cur_ref_count != 0);
return false;
}
MESOSPHERE_ABORT_UNLESS(cur_ref_count < cur_ref_count + 1);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed));
return true;
}
NOINLINE void KAutoObject::Close() {
MESOSPHERE_ASSERT_THIS();
/* Atomically decrement the reference count, not allowing it to become negative. */
u32 cur_ref_count = m_ref_count.load(std::memory_order_relaxed);
do {
MESOSPHERE_ABORT_UNLESS(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed));
/* If ref count hits zero, schedule the object for destruction. */
if (cur_ref_count - 1 == 0) {
this->ScheduleDestruction();
}
}
} }

View file

@ -28,8 +28,7 @@ namespace ams::kern {
void KClientPort::OnSessionFinalized() { void KClientPort::OnSessionFinalized() {
KScopedSchedulerLock sl; KScopedSchedulerLock sl;
const auto prev = m_num_sessions--; if (m_num_sessions.FetchSub(1) == m_max_sessions) {
if (prev == m_max_sessions) {
this->NotifyAvailable(); this->NotifyAvailable();
} }
} }
@ -56,7 +55,7 @@ namespace ams::kern {
bool KClientPort::IsSignaled() const { bool KClientPort::IsSignaled() const {
MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT_THIS();
return m_num_sessions < m_max_sessions; return m_num_sessions.Load() < m_max_sessions;
} }
Result KClientPort::CreateSession(KClientSession **out) { Result KClientPort::CreateSession(KClientSession **out) {
@ -99,7 +98,6 @@ namespace ams::kern {
/* Check that we successfully created a session. */ /* Check that we successfully created a session. */
R_UNLESS(session != nullptr, svc::ResultOutOfResource()); R_UNLESS(session != nullptr, svc::ResultOutOfResource());
/* Update the session counts. */ /* Update the session counts. */
auto count_guard = SCOPE_GUARD { session->Close(); }; auto count_guard = SCOPE_GUARD { session->Close(); };
{ {
@ -107,22 +105,22 @@ namespace ams::kern {
s32 new_sessions; s32 new_sessions;
{ {
const auto max = m_max_sessions; const auto max = m_max_sessions;
auto cur_sessions = m_num_sessions.load(std::memory_order_acquire); auto cur_sessions = m_num_sessions.Load();
do { do {
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions()); R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
new_sessions = cur_sessions + 1; new_sessions = cur_sessions + 1;
} while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed)); } while (!m_num_sessions.CompareExchangeWeak<std::memory_order_relaxed>(cur_sessions, new_sessions));
} }
/* Atomically update the peak session tracking. */ /* Atomically update the peak session tracking. */
{ {
auto peak = m_peak_sessions.load(std::memory_order_acquire); auto peak = m_peak_sessions.Load();
do { do {
if (peak >= new_sessions) { if (peak >= new_sessions) {
break; break;
} }
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed)); } while (!m_peak_sessions.CompareExchangeWeak<std::memory_order_relaxed>(peak, new_sessions));
} }
} }
count_guard.Cancel(); count_guard.Cancel();
@ -183,22 +181,22 @@ namespace ams::kern {
s32 new_sessions; s32 new_sessions;
{ {
const auto max = m_max_sessions; const auto max = m_max_sessions;
auto cur_sessions = m_num_sessions.load(std::memory_order_acquire); auto cur_sessions = m_num_sessions.Load();
do { do {
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions()); R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
new_sessions = cur_sessions + 1; new_sessions = cur_sessions + 1;
} while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed)); } while (!m_num_sessions.CompareExchangeWeak<std::memory_order_relaxed>(cur_sessions, new_sessions));
} }
/* Atomically update the peak session tracking. */ /* Atomically update the peak session tracking. */
{ {
auto peak = m_peak_sessions.load(std::memory_order_acquire); auto peak = m_peak_sessions.Load();
do { do {
if (peak >= new_sessions) { if (peak >= new_sessions) {
break; break;
} }
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed)); } while (!m_peak_sessions.CompareExchangeWeak<std::memory_order_relaxed>(peak, new_sessions));
} }
} }
count_guard.Cancel(); count_guard.Cancel();

View file

@ -25,31 +25,6 @@ namespace ams::kern {
} }
void KDebugBase::ProcessHolder::Attach(KProcess *process) {
MESOSPHERE_ASSERT(m_process == nullptr);
/* Set our process. */
m_process = process;
/* Open reference to our process. */
m_process->Open();
/* Set our reference count. */
m_ref_count = 1;
}
void KDebugBase::ProcessHolder::Detach() {
/* Close our process, if we have one. */
KProcess * const process = m_process;
if (AMS_LIKELY(process != nullptr)) {
/* Set our process to a debug sentinel value, which will cause crash if accessed. */
m_process = reinterpret_cast<KProcess *>(1);
/* Close reference to our process. */
process->Close();
}
}
void KDebugBase::Initialize() { void KDebugBase::Initialize() {
/* Clear the continue flags. */ /* Clear the continue flags. */
m_continue_flags = 0; m_continue_flags = 0;

View file

@ -1,47 +0,0 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
NOINLINE bool KDebugBase::ProcessHolder::Open() {
/* Atomically increment the reference count, only if it's positive. */
u32 cur_ref_count = m_ref_count.load(std::memory_order_relaxed);
do {
if (AMS_UNLIKELY(cur_ref_count == 0)) {
MESOSPHERE_AUDIT(cur_ref_count != 0);
return false;
}
MESOSPHERE_ABORT_UNLESS(cur_ref_count < cur_ref_count + 1);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed));
return true;
}
NOINLINE void KDebugBase::ProcessHolder::Close() {
/* Atomically decrement the reference count, not allowing it to become negative. */
u32 cur_ref_count = m_ref_count.load(std::memory_order_relaxed);
do {
MESOSPHERE_ABORT_UNLESS(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed));
/* If ref count hits zero, schedule the object for destruction. */
if (cur_ref_count - 1 == 0) {
this->Detach();
}
}
}

View file

@ -40,7 +40,7 @@ namespace ams::kern {
KScopedSchedulerLock sl; KScopedSchedulerLock sl;
/* Ensure we actually have locking to do. */ /* Ensure we actually have locking to do. */
if (m_tag.load(std::memory_order_relaxed) != _owner) { if (m_tag.Load<std::memory_order_relaxed>() != _owner) {
return false; return false;
} }
@ -68,16 +68,13 @@ namespace ams::kern {
KScopedSchedulerLock sl; KScopedSchedulerLock sl;
/* Get the next owner. */ /* Get the next owner. */
s32 num_waiters = 0; s32 num_waiters;
KThread *next_owner = owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_tag))); KThread *next_owner = owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
/* Pass the lock to the next owner. */ /* Pass the lock to the next owner. */
uintptr_t next_tag = 0; uintptr_t next_tag = 0;
if (next_owner != nullptr) { if (next_owner != nullptr) {
next_tag = reinterpret_cast<uintptr_t>(next_owner); next_tag = reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1);
if (num_waiters > 1) {
next_tag |= 0x1;
}
next_owner->EndWait(ResultSuccess()); next_owner->EndWait(ResultSuccess());
@ -92,7 +89,7 @@ namespace ams::kern {
} }
/* Write the new tag value. */ /* Write the new tag value. */
m_tag.store(next_tag); m_tag.Store<std::memory_order_release>(next_tag);
} }
} }

View file

@ -25,8 +25,8 @@ namespace ams::kern {
constexpr u64 ProcessIdMin = InitialProcessIdMax + 1; constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max(); constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
std::atomic<u64> g_initial_process_id = InitialProcessIdMin; constinit util::Atomic<u64> g_initial_process_id{InitialProcessIdMin};
std::atomic<u64> g_process_id = ProcessIdMin; constinit util::Atomic<u64> g_process_id{ProcessIdMin};
Result TerminateChildren(KProcess *process, const KThread *thread_to_not_terminate) { Result TerminateChildren(KProcess *process, const KThread *thread_to_not_terminate) {
/* Request that all children threads terminate. */ /* Request that all children threads terminate. */
@ -299,7 +299,7 @@ namespace ams::kern {
R_TRY(m_capabilities.Initialize(caps, num_caps, std::addressof(m_page_table))); R_TRY(m_capabilities.Initialize(caps, num_caps, std::addressof(m_page_table)));
/* Initialize the process id. */ /* Initialize the process id. */
m_process_id = g_initial_process_id++; m_process_id = g_initial_process_id.FetchAdd(1);
MESOSPHERE_ABORT_UNLESS(InitialProcessIdMin <= m_process_id); MESOSPHERE_ABORT_UNLESS(InitialProcessIdMin <= m_process_id);
MESOSPHERE_ABORT_UNLESS(m_process_id <= InitialProcessIdMax); MESOSPHERE_ABORT_UNLESS(m_process_id <= InitialProcessIdMax);
@ -409,7 +409,7 @@ namespace ams::kern {
R_TRY(m_capabilities.Initialize(user_caps, num_caps, std::addressof(m_page_table))); R_TRY(m_capabilities.Initialize(user_caps, num_caps, std::addressof(m_page_table)));
/* Initialize the process id. */ /* Initialize the process id. */
m_process_id = g_process_id++; m_process_id = g_process_id.FetchAdd(1);
MESOSPHERE_ABORT_UNLESS(ProcessIdMin <= m_process_id); MESOSPHERE_ABORT_UNLESS(ProcessIdMin <= m_process_id);
MESOSPHERE_ABORT_UNLESS(m_process_id <= ProcessIdMax); MESOSPHERE_ABORT_UNLESS(m_process_id <= ProcessIdMax);
@ -789,15 +789,15 @@ namespace ams::kern {
} }
void KProcess::IncrementRunningThreadCount() { void KProcess::IncrementRunningThreadCount() {
MESOSPHERE_ASSERT(m_num_running_threads.load() >= 0); MESOSPHERE_ASSERT(m_num_running_threads.Load() >= 0);
m_num_running_threads.fetch_add(1); m_num_running_threads.FetchAdd(1);
} }
void KProcess::DecrementRunningThreadCount() { void KProcess::DecrementRunningThreadCount() {
MESOSPHERE_ASSERT(m_num_running_threads.load() > 0); MESOSPHERE_ASSERT(m_num_running_threads.Load() > 0);
if (m_num_running_threads.fetch_sub(1) == 1) { if (m_num_running_threads.FetchSub(1) == 1) {
this->Terminate(); this->Terminate();
} }
} }

View file

@ -246,9 +246,9 @@ namespace ams::kern {
if (cur_process != nullptr) { if (cur_process != nullptr) {
/* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */ /* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */
if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == m_core_id)) { if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == m_core_id)) {
m_state.prev_thread = cur_thread; m_state.prev_thread.Store<std::memory_order_relaxed>(cur_thread);
} else { } else {
m_state.prev_thread = nullptr; m_state.prev_thread.Store<std::memory_order_relaxed>(nullptr);
} }
} }
@ -270,13 +270,9 @@ namespace ams::kern {
void KScheduler::ClearPreviousThread(KThread *thread) { void KScheduler::ClearPreviousThread(KThread *thread) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
for (size_t i = 0; i < cpu::NumCores; ++i) { for (size_t i = 0; i < cpu::NumCores; ++i) {
/* Get an atomic reference to the core scheduler's previous thread. */
std::atomic_ref<KThread *> prev_thread(Kernel::GetScheduler(static_cast<s32>(i)).m_state.prev_thread);
static_assert(std::atomic_ref<KThread *>::is_always_lock_free);
/* Atomically clear the previous thread if it's our target. */ /* Atomically clear the previous thread if it's our target. */
KThread *compare = thread; KThread *compare = thread;
prev_thread.compare_exchange_strong(compare, nullptr); Kernel::GetScheduler(static_cast<s32>(i)).m_state.prev_thread.CompareExchangeStrong(compare, nullptr);
} }
} }

View file

@ -219,7 +219,7 @@ namespace ams::kern {
this->SetInExceptionHandler(); this->SetInExceptionHandler();
/* Set thread ID. */ /* Set thread ID. */
m_thread_id = s_next_thread_id++; m_thread_id = s_next_thread_id.FetchAdd(1);
/* We initialized! */ /* We initialized! */
m_initialized = true; m_initialized = true;
@ -707,7 +707,7 @@ namespace ams::kern {
KScopedSchedulerLock sl; KScopedSchedulerLock sl;
/* Determine the priority value to use. */ /* Determine the priority value to use. */
const s32 target_priority = m_termination_requested.load() && priority >= TerminatingThreadPriority ? TerminatingThreadPriority : priority; const s32 target_priority = m_termination_requested.Load() && priority >= TerminatingThreadPriority ? TerminatingThreadPriority : priority;
/* Change our base priority. */ /* Change our base priority. */
if (this->GetStackParameters().is_pinned) { if (this->GetStackParameters().is_pinned) {
@ -1183,7 +1183,7 @@ namespace ams::kern {
const bool first_request = [&] ALWAYS_INLINE_LAMBDA () -> bool { const bool first_request = [&] ALWAYS_INLINE_LAMBDA () -> bool {
/* Perform an atomic compare-and-swap from false to true. */ /* Perform an atomic compare-and-swap from false to true. */
u8 expected = false; u8 expected = false;
return m_termination_requested.compare_exchange_strong(expected, true); return m_termination_requested.CompareExchangeStrong(expected, true);
}(); }();
/* If this is the first request, start termination procedure. */ /* If this is the first request, start termination procedure. */

View file

@ -42,15 +42,15 @@ namespace ams::kern {
return arr; return arr;
}(); }();
std::atomic<s32> g_next_ticket = 0; constinit util::Atomic<s32> g_next_ticket{0};
std::atomic<s32> g_current_ticket = 0; constinit util::Atomic<s32> g_current_ticket{0};
std::array<s32, cpu::NumCores> g_core_tickets = NegativeArray; constinit std::array<s32, cpu::NumCores> g_core_tickets = NegativeArray;
s32 GetCoreTicket() { s32 GetCoreTicket() {
const s32 core_id = GetCurrentCoreId(); const s32 core_id = GetCurrentCoreId();
if (g_core_tickets[core_id] == -1) { if (g_core_tickets[core_id] == -1) {
g_core_tickets[core_id] = 2 * g_next_ticket.fetch_add(1); g_core_tickets[core_id] = 2 * g_next_ticket.FetchAdd(1);
} }
return g_core_tickets[core_id]; return g_core_tickets[core_id];
} }
@ -58,24 +58,21 @@ namespace ams::kern {
void WaitCoreTicket() { void WaitCoreTicket() {
const s32 expected = GetCoreTicket(); const s32 expected = GetCoreTicket();
const s32 desired = expected + 1; const s32 desired = expected + 1;
s32 compare = g_current_ticket; s32 compare = g_current_ticket.Load<std::memory_order_relaxed>();
do { do {
if (compare == desired) { if (compare == desired) {
break; break;
} }
compare = expected; compare = expected;
} while (!g_current_ticket.compare_exchange_weak(compare, desired)); } while (!g_current_ticket.CompareExchangeWeak(compare, desired));
} }
void ReleaseCoreTicket() { void ReleaseCoreTicket() {
const s32 expected = GetCoreTicket() + 1; const s32 expected = GetCoreTicket() + 1;
const s32 desired = expected + 1; const s32 desired = expected + 1;
s32 compare = g_current_ticket;
do { s32 compare = expected;
if (compare != expected) { g_current_ticket.CompareExchangeStrong(compare, desired);
break;
}
} while (!g_current_ticket.compare_exchange_weak(compare, desired));
} }
ALWAYS_INLINE KExceptionContext *GetPanicExceptionContext(int core_id) { ALWAYS_INLINE KExceptionContext *GetPanicExceptionContext(int core_id) {

View file

@ -53,6 +53,8 @@
#include <vapours/util/util_fixed_map.hpp> #include <vapours/util/util_fixed_map.hpp>
#include <vapours/util/util_fixed_set.hpp> #include <vapours/util/util_fixed_set.hpp>
#include <vapours/util/util_atomic.hpp>
#ifdef ATMOSPHERE_IS_STRATOSPHERE #ifdef ATMOSPHERE_IS_STRATOSPHERE
#include <vapours/util/util_mutex_utils.hpp> #include <vapours/util/util_mutex_utils.hpp>
#endif #endif

View file

@ -0,0 +1,323 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours/common.hpp>
#include <vapours/assert.hpp>
namespace ams::util {
namespace impl {
template<typename T>
struct AtomicIntegerStorage;
template<typename T> requires (sizeof(T) == sizeof(u8))
struct AtomicIntegerStorage<T> {
using Type = u8;
};
template<typename T> requires (sizeof(T) == sizeof(u16))
struct AtomicIntegerStorage<T> {
using Type = u16;
};
template<typename T> requires (sizeof(T) == sizeof(u32))
struct AtomicIntegerStorage<T> {
using Type = u32;
};
template<typename T> requires (sizeof(T) == sizeof(u64))
struct AtomicIntegerStorage<T> {
using Type = u64;
};
template<typename T>
concept UsableAtomicType = (sizeof(T) <= sizeof(u64)) && !std::is_const<T>::value && !std::is_volatile<T>::value && (std::is_pointer<T>::value || requires (const T &t) {
std::bit_cast<typename AtomicIntegerStorage<T>::Type, T>(t);
});
template<UsableAtomicType T>
using AtomicStorage = typename AtomicIntegerStorage<T>::Type;
static_assert(std::same_as<AtomicStorage<void *>, uintptr_t>);
static_assert(std::same_as<AtomicStorage<s8>, u8>);
static_assert(std::same_as<AtomicStorage<u8>, u8>);
static_assert(std::same_as<AtomicStorage<s16>, u16>);
static_assert(std::same_as<AtomicStorage<u16>, u16>);
static_assert(std::same_as<AtomicStorage<s32>, u32>);
static_assert(std::same_as<AtomicStorage<u32>, u32>);
static_assert(std::same_as<AtomicStorage<s64>, u64>);
static_assert(std::same_as<AtomicStorage<u64>, u64>);
ALWAYS_INLINE void ClearExclusiveForAtomic() {
__asm__ __volatile__("clrex" ::: "memory");
}
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(_FNAME_, _MNEMONIC_) \
template<std::unsigned_integral T> T _FNAME_ ##ForAtomic(const volatile T *); \
\
template<> ALWAYS_INLINE u8 _FNAME_ ##ForAtomic(const volatile u8 *p) { u8 v; __asm__ __volatile__(_MNEMONIC_ "b %w[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; } \
template<> ALWAYS_INLINE u16 _FNAME_ ##ForAtomic(const volatile u16 *p) { u16 v; __asm__ __volatile__(_MNEMONIC_ "h %w[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; } \
template<> ALWAYS_INLINE u32 _FNAME_ ##ForAtomic(const volatile u32 *p) { u32 v; __asm__ __volatile__(_MNEMONIC_ " %w[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; } \
template<> ALWAYS_INLINE u64 _FNAME_ ##ForAtomic(const volatile u64 *p) { u64 v; __asm__ __volatile__(_MNEMONIC_ " %[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; }
AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(LoadAcquire, "ldar")
AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(LoadExclusive, "ldxr")
AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(LoadAcquireExclusive, "ldaxr")
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION
template<std::unsigned_integral T> void StoreReleaseForAtomic(volatile T *, T);
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u8 *p, u8 v) { __asm__ __volatile__("stlrb %w[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u16 *p, u16 v) { __asm__ __volatile__("stlrh %w[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u32 *p, u32 v) { __asm__ __volatile__("stlr %w[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u64 *p, u64 v) { __asm__ __volatile__("stlr %[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION(_FNAME_, _MNEMONIC_) \
template<std::unsigned_integral T> bool _FNAME_ ##ForAtomic(volatile T *, T); \
\
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u8 *p, u8 v) { int result; __asm__ __volatile__(_MNEMONIC_ "b %w[result], %w[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; } \
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u16 *p, u16 v) { int result; __asm__ __volatile__(_MNEMONIC_ "h %w[result], %w[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; } \
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u32 *p, u32 v) { int result; __asm__ __volatile__(_MNEMONIC_ " %w[result], %w[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; } \
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u64 *p, u64 v) { int result; __asm__ __volatile__(_MNEMONIC_ " %w[result], %[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; }
AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION(StoreExclusive, "stxr")
AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION(StoreReleaseExclusive, "stlxr")
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION
}
template<impl::UsableAtomicType T>
class Atomic {
NON_COPYABLE(Atomic);
NON_MOVEABLE(Atomic);
private:
using StorageType = impl::AtomicStorage<T>;
static constexpr bool IsIntegral = std::integral<T>;
static constexpr ALWAYS_INLINE T ConvertToType(StorageType s) {
if constexpr (std::integral<T>) {
return static_cast<T>(s);
} else if constexpr(std::is_pointer<T>::value) {
return reinterpret_cast<T>(s);
} else {
return std::bit_cast<T>(s);
}
}
static constexpr ALWAYS_INLINE StorageType ConvertToStorage(T arg) {
if constexpr (std::integral<T>) {
return static_cast<StorageType>(arg);
} else if constexpr(std::is_pointer<T>::value) {
if (std::is_constant_evaluated() && arg == nullptr) {
return 0;
}
return reinterpret_cast<StorageType>(arg);
} else {
return std::bit_cast<StorageType>(arg);
}
}
private:
StorageType m_v;
private:
ALWAYS_INLINE volatile StorageType *GetStoragePointer() { return reinterpret_cast< volatile StorageType *>(std::addressof(m_v)); }
ALWAYS_INLINE const volatile StorageType *GetStoragePointer() const { return reinterpret_cast<const volatile StorageType *>(std::addressof(m_v)); }
public:
ALWAYS_INLINE explicit Atomic() { /* ... */ }
constexpr ALWAYS_INLINE explicit Atomic(T v) : m_v(ConvertToStorage(v)) { /* ... */ }
constexpr ALWAYS_INLINE T operator=(T desired) {
if (std::is_constant_evaluated()) {
m_v = ConvertToStorage(desired);
} else {
this->Store(desired);
}
return desired;
}
template<std::memory_order Order = std::memory_order_seq_cst>
ALWAYS_INLINE T Load() const {
if constexpr (Order != std::memory_order_relaxed) {
return ConvertToType(impl::LoadAcquireForAtomic(this->GetStoragePointer()));
} else {
return ConvertToType(*this->GetStoragePointer());
}
}
template<std::memory_order Order = std::memory_order_seq_cst>
ALWAYS_INLINE void Store(T arg) {
if constexpr (Order != std::memory_order_relaxed) {
impl::StoreReleaseForAtomic(this->GetStoragePointer(), ConvertToStorage(arg));
} else {
*this->GetStoragePointer() = ConvertToStorage(arg);
}
}
template<std::memory_order Order = std::memory_order_seq_cst>
ALWAYS_INLINE T Exchange(T arg) {
volatile StorageType * const p = this->GetStoragePointer();
const StorageType s = ConvertToStorage(arg);
StorageType current;
if constexpr (Order == std::memory_order_relaxed) {
do {
current = impl::LoadExclusiveForAtomic(p);
} while (AMS_UNLIKELY(!impl::StoreExclusiveForAtomic(p, s)));
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
do {
current = impl::LoadAcquireExclusiveForAtomic(p);
} while (AMS_UNLIKELY(!impl::StoreExclusiveForAtomic(p, s)));
} else if constexpr (Order == std::memory_order_release) {
do {
current = impl::LoadExclusiveForAtomic(p);
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic(p, s)));
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
do {
current = impl::LoadAcquireExclusiveForAtomic(p);
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic(p, s)));
} else {
static_assert(Order != Order, "Invalid memory order");
}
return current;
}
template<std::memory_order Order = std::memory_order_seq_cst>
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) {
volatile StorageType * const p = this->GetStoragePointer();
const StorageType e = ConvertToStorage(expected);
const StorageType d = ConvertToStorage(desired);
if constexpr (Order == std::memory_order_relaxed) {
const StorageType current = impl::LoadExclusiveForAtomic(p);
if (AMS_UNLIKELY(current != e)) {
impl::ClearExclusiveForAtomic();
expected = ConvertToType(current);
return false;
}
return AMS_LIKELY(impl::StoreExclusiveForAtomic(p, d));
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
const StorageType current = impl::LoadAcquireExclusiveForAtomic(p);
if (AMS_UNLIKELY(current != e)) {
impl::ClearExclusiveForAtomic();
expected = ConvertToType(current);
return false;
}
return AMS_LIKELY(impl::StoreExclusiveForAtomic(p, d));
} else if constexpr (Order == std::memory_order_release) {
const StorageType current = impl::LoadExclusiveForAtomic(p);
if (AMS_UNLIKELY(current != e)) {
impl::ClearExclusiveForAtomic();
expected = ConvertToType(current);
return false;
}
return AMS_LIKELY(impl::StoreReleaseExclusiveForAtomic(p, d));
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
const StorageType current = impl::LoadAcquireExclusiveForAtomic(p);
if (AMS_UNLIKELY(current != e)) {
impl::ClearExclusiveForAtomic();
expected = ConvertToType(current);
return false;
}
return AMS_LIKELY(impl::StoreReleaseExclusiveForAtomic(p, d));
} else {
static_assert(Order != Order, "Invalid memory order");
}
}
template<std::memory_order Order = std::memory_order_seq_cst>
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) {
volatile StorageType * const p = this->GetStoragePointer();
const StorageType e = ConvertToStorage(expected);
const StorageType d = ConvertToStorage(desired);
if constexpr (Order == std::memory_order_relaxed) {
StorageType current;
do {
if (current = impl::LoadExclusiveForAtomic(p); current != e) {
impl::ClearExclusiveForAtomic();
expected = ConvertToType(current);
return false;
}
} while (!impl::StoreExclusiveForAtomic(p, d));
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
StorageType current;
do {
if (current = impl::LoadAcquireExclusiveForAtomic(p); current != e) {
impl::ClearExclusiveForAtomic();
expected = ConvertToType(current);
return false;
}
} while (!impl::StoreExclusiveForAtomic(p, d));
} else if constexpr (Order == std::memory_order_release) {
StorageType current;
do {
if (current = impl::LoadExclusiveForAtomic(p); current != e) {
impl::ClearExclusiveForAtomic();
expected = ConvertToType(current);
return false;
}
} while (!impl::StoreReleaseExclusiveForAtomic(p, d));
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
StorageType current;
do {
if (current = impl::LoadAcquireExclusiveForAtomic(p); current != e) {
impl::ClearExclusiveForAtomic();
expected = ConvertToType(current);
return false;
}
} while (!impl::StoreReleaseExclusiveForAtomic(p, d));
} else {
static_assert(Order != Order, "Invalid memory order");
}
return true;
}
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATOR_) \
template<bool Enable = IsIntegral, typename = typename std::enable_if<Enable, void>::type> \
ALWAYS_INLINE T Fetch ## _OPERATION_(T arg) { \
static_assert(Enable); \
volatile StorageType * const p = this->GetStoragePointer(); \
const StorageType s = ConvertToStorage(arg); \
\
StorageType current; \
do { \
current = impl::LoadAcquireExclusiveForAtomic<StorageType>(p); \
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic<StorageType>(p, current _OPERATOR_ s))); \
return static_cast<T>(current); \
}
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Add, +)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Sub, -)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(And, &)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Or, |)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Xor, ^)
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION
};
}

View file

@ -0,0 +1,111 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours/common.hpp>
#include <vapours/assert.hpp>
namespace ams::util {
namespace impl {
template<typename T>
struct AtomicIntegerStorage;
template<typename T> requires (sizeof(T) == sizeof(u8))
struct AtomicIntegerStorage<T> {
using Type = u8;
};
template<typename T> requires (sizeof(T) == sizeof(u16))
struct AtomicIntegerStorage<T> {
using Type = u16;
};
template<typename T> requires (sizeof(T) == sizeof(u32))
struct AtomicIntegerStorage<T> {
using Type = u32;
};
template<typename T> requires (sizeof(T) == sizeof(u64))
struct AtomicIntegerStorage<T> {
using Type = u64;
};
template<typename T>
concept UsableAtomicType = (sizeof(T) <= sizeof(u64)) && !std::is_const<T>::value && !std::is_volatile<T>::value && (std::is_pointer<T>::value || requires (const T &t) {
std::bit_cast<typename AtomicIntegerStorage<T>::Type, T>(t);
});
}
template<impl::UsableAtomicType T>
class Atomic {
NON_COPYABLE(Atomic);
NON_MOVEABLE(Atomic);
private:
static_assert(std::atomic<T>::is_always_lock_free);
private:
std::atomic<T> m_v;
public:
ALWAYS_INLINE explicit Atomic() { /* ... */ }
constexpr ALWAYS_INLINE explicit Atomic(T v) : m_v(v) { /* ... */ }
ALWAYS_INLINE T operator=(T desired) {
return (m_v = desired);
}
template<std::memory_order Order = std::memory_order_seq_cst>
ALWAYS_INLINE T Load() const {
return m_v.load(Order);
}
template<std::memory_order Order = std::memory_order_seq_cst>
ALWAYS_INLINE void Store(T arg) {
return m_v.store(Order);
}
template<std::memory_order Order>
ALWAYS_INLINE T Exchange(T arg) {
return m_v.exchange(arg, Order);
}
template<std::memory_order Order>
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) {
return m_v.compare_exchange_weak(expected, desired, Order);
}
template<std::memory_order Order>
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) {
return m_v.compare_exchange_strong(expected, desired, Order);
}
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATION_LOWER_) \
ALWAYS_INLINE T Fetch ## _OPERATION_(T arg) { \
return m_v.fetch_##_OPERATION_LOWER_(arg); \
}
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Add, add)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Sub, sub)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(And, and)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Or, or)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Xor, xor)
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION
};
}

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours/common.hpp>
#include <vapours/assert.hpp>
#if defined(ATMOSPHERE_ARCH_ARM64)
#include <vapours/util/arch/arm64/util_atomic.hpp>
#else
#include <vapours/util/arch/generic/util_atomic.hpp>
#endif