/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see .
*/
#pragma once
#include
#include
namespace ams::util {
namespace impl {
template
struct AtomicIntegerStorage;
template requires (sizeof(T) == sizeof(u8))
struct AtomicIntegerStorage {
using Type = u8;
};
template requires (sizeof(T) == sizeof(u16))
struct AtomicIntegerStorage {
using Type = u16;
};
template requires (sizeof(T) == sizeof(u32))
struct AtomicIntegerStorage {
using Type = u32;
};
template requires (sizeof(T) == sizeof(u64))
struct AtomicIntegerStorage {
using Type = u64;
};
template
concept UsableAtomicType = (sizeof(T) <= sizeof(u64)) && !std::is_const::value && !std::is_volatile::value && (std::is_pointer::value || requires (const T &t) {
std::bit_cast::Type, T>(t);
});
template
using AtomicStorage = typename AtomicIntegerStorage::Type;
static_assert(std::same_as, u64>);
static_assert(std::same_as, u8>);
static_assert(std::same_as, u8>);
static_assert(std::same_as, u16>);
static_assert(std::same_as, u16>);
static_assert(std::same_as, u32>);
static_assert(std::same_as, u32>);
static_assert(std::same_as, u64>);
static_assert(std::same_as, u64>);
ALWAYS_INLINE void ClearExclusiveForAtomic() {
__asm__ __volatile__("clrex" ::: "memory");
}
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(_FNAME_, _MNEMONIC_) \
template T _FNAME_ ##ForAtomic(const volatile T *); \
\
template<> ALWAYS_INLINE u8 _FNAME_ ##ForAtomic(const volatile u8 *p) { u8 v; __asm__ __volatile__(_MNEMONIC_ "b %w[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; } \
template<> ALWAYS_INLINE u16 _FNAME_ ##ForAtomic(const volatile u16 *p) { u16 v; __asm__ __volatile__(_MNEMONIC_ "h %w[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; } \
template<> ALWAYS_INLINE u32 _FNAME_ ##ForAtomic(const volatile u32 *p) { u32 v; __asm__ __volatile__(_MNEMONIC_ " %w[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; } \
template<> ALWAYS_INLINE u64 _FNAME_ ##ForAtomic(const volatile u64 *p) { u64 v; __asm__ __volatile__(_MNEMONIC_ " %[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; }
AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(LoadAcquire, "ldar")
AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(LoadExclusive, "ldxr")
AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(LoadAcquireExclusive, "ldaxr")
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION
template void StoreReleaseForAtomic(volatile T *, T);
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u8 *p, u8 v) { __asm__ __volatile__("stlrb %w[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u16 *p, u16 v) { __asm__ __volatile__("stlrh %w[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u32 *p, u32 v) { __asm__ __volatile__("stlr %w[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u64 *p, u64 v) { __asm__ __volatile__("stlr %[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION(_FNAME_, _MNEMONIC_) \
template bool _FNAME_ ##ForAtomic(volatile T *, T); \
\
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u8 *p, u8 v) { int result; __asm__ __volatile__(_MNEMONIC_ "b %w[result], %w[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; } \
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u16 *p, u16 v) { int result; __asm__ __volatile__(_MNEMONIC_ "h %w[result], %w[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; } \
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u32 *p, u32 v) { int result; __asm__ __volatile__(_MNEMONIC_ " %w[result], %w[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; } \
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u64 *p, u64 v) { int result; __asm__ __volatile__(_MNEMONIC_ " %w[result], %[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; }
AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION(StoreExclusive, "stxr")
AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION(StoreReleaseExclusive, "stlxr")
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION
template
constexpr ALWAYS_INLINE T ConvertToTypeForAtomic(AtomicStorage s) {
if constexpr (std::integral) {
return static_cast(s);
} else if constexpr(std::is_pointer::value) {
return reinterpret_cast(s);
} else {
return std::bit_cast(s);
}
}
template
constexpr ALWAYS_INLINE AtomicStorage ConvertToStorageForAtomic(T arg) {
if constexpr (std::integral) {
return static_cast>(arg);
} else if constexpr(std::is_pointer::value) {
if (std::is_constant_evaluated() && arg == nullptr) {
return 0;
}
return reinterpret_cast>(arg);
} else {
return std::bit_cast>(arg);
}
}
template
ALWAYS_INLINE StorageType AtomicLoadImpl(volatile StorageType * const p) {
if constexpr (Order != std::memory_order_relaxed) {
return ::ams::util::impl::LoadAcquireForAtomic(p);
} else {
return *p;
}
}
template
ALWAYS_INLINE void AtomicStoreImpl(volatile StorageType * const p, const StorageType s) {
if constexpr (Order != std::memory_order_relaxed) {
::ams::util::impl::StoreReleaseForAtomic(p, s);
} else {
*p = s;
}
}
template
ALWAYS_INLINE StorageType LoadExclusiveForAtomicByMemoryOrder(volatile StorageType * const p) {
if constexpr (Order == std::memory_order_relaxed) {
return ::ams::util::impl::LoadExclusiveForAtomic(p);
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
return ::ams::util::impl::LoadAcquireExclusiveForAtomic(p);
} else if constexpr (Order == std::memory_order_release) {
return ::ams::util::impl::LoadExclusiveForAtomic(p);
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
return ::ams::util::impl::LoadAcquireExclusiveForAtomic(p);
} else {
static_assert(false, "Invalid memory order");
}
}
template
ALWAYS_INLINE bool StoreExclusiveForAtomicByMemoryOrder(volatile StorageType * const p, const StorageType s) {
if constexpr (Order == std::memory_order_relaxed) {
return ::ams::util::impl::StoreExclusiveForAtomic(p, s);
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
return ::ams::util::impl::StoreExclusiveForAtomic(p, s);
} else if constexpr (Order == std::memory_order_release) {
return ::ams::util::impl::StoreReleaseExclusiveForAtomic(p, s);
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
return ::ams::util::impl::StoreReleaseExclusiveForAtomic(p, s);
} else {
static_assert(false, "Invalid memory order");
}
}
template
ALWAYS_INLINE StorageType AtomicExchangeImpl(volatile StorageType * const p, const StorageType s) {
StorageType current;
do {
current = ::ams::util::impl::LoadExclusiveForAtomicByMemoryOrder(p);
} while(AMS_UNLIKELY(!impl::StoreExclusiveForAtomicByMemoryOrder(p, s)));
return current;
}
template
ALWAYS_INLINE bool AtomicCompareExchangeWeakImpl(volatile AtomicStorage * const p, T &expected, T desired) {
const AtomicStorage e = ::ams::util::impl::ConvertToStorageForAtomic(expected);
const AtomicStorage d = ::ams::util::impl::ConvertToStorageForAtomic(desired);
const AtomicStorage current = ::ams::util::impl::LoadExclusiveForAtomicByMemoryOrder(p);
if (AMS_UNLIKELY(current != e)) {
impl::ClearExclusiveForAtomic();
expected = ::ams::util::impl::ConvertToTypeForAtomic(current);
return false;
}
return AMS_LIKELY(impl::StoreExclusiveForAtomicByMemoryOrder(p, d));
}
template
ALWAYS_INLINE bool AtomicCompareExchangeStrongImpl(volatile AtomicStorage * const p, T &expected, T desired) {
const AtomicStorage e = ::ams::util::impl::ConvertToStorageForAtomic(expected);
const AtomicStorage d = ::ams::util::impl::ConvertToStorageForAtomic(desired);
do {
if (const AtomicStorage current = ::ams::util::impl::LoadExclusiveForAtomicByMemoryOrder(p); AMS_UNLIKELY(current != e)) {
impl::ClearExclusiveForAtomic();
expected = ::ams::util::impl::ConvertToTypeForAtomic(current);
return false;
}
} while (AMS_UNLIKELY(!impl::StoreExclusiveForAtomicByMemoryOrder(p, d)));
return true;
}
}
template
class Atomic {
NON_COPYABLE(Atomic);
NON_MOVEABLE(Atomic);
private:
using StorageType = impl::AtomicStorage;
static constexpr bool IsIntegral = std::integral;
static constexpr bool IsPointer = std::is_pointer::value;
static constexpr bool HasArithmeticFunctions = IsIntegral || IsPointer;
using DifferenceType = typename std::conditional::type>::type;
static constexpr ALWAYS_INLINE T ConvertToType(StorageType s) {
return impl::ConvertToTypeForAtomic(s);
}
static constexpr ALWAYS_INLINE StorageType ConvertToStorage(T arg) {
return impl::ConvertToStorageForAtomic(arg);
}
private:
StorageType m_v;
private:
ALWAYS_INLINE volatile StorageType *GetStoragePointer() { return reinterpret_cast< volatile StorageType *>(std::addressof(m_v)); }
ALWAYS_INLINE const volatile StorageType *GetStoragePointer() const { return reinterpret_cast(std::addressof(m_v)); }
public:
ALWAYS_INLINE Atomic() { /* ... */ }
constexpr ALWAYS_INLINE Atomic(T v) : m_v(ConvertToStorage(v)) { /* ... */ }
constexpr ALWAYS_INLINE T operator=(T desired) {
if (std::is_constant_evaluated()) {
m_v = ConvertToStorage(desired);
} else {
this->Store(desired);
}
return desired;
}
ALWAYS_INLINE operator T() const { return this->Load(); }
template
ALWAYS_INLINE T Load() const {
return ConvertToType(impl::AtomicLoadImpl(this->GetStoragePointer()));
}
template
ALWAYS_INLINE void Store(T arg) {
return impl::AtomicStoreImpl(this->GetStoragePointer(), ConvertToStorage(arg));
}
template
ALWAYS_INLINE T Exchange(T arg) {
return ConvertToType(impl::AtomicExchangeImpl(this->GetStoragePointer(), ConvertToStorage(arg)));
}
template
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) {
return impl::AtomicCompareExchangeWeakImpl(this->GetStoragePointer(), expected, desired);
}
template
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) {
return impl::AtomicCompareExchangeStrongImpl(this->GetStoragePointer(), expected, desired);
}
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATOR_, _POINTER_ALLOWED_) \
template::type> \
ALWAYS_INLINE T Fetch ## _OPERATION_(DifferenceType arg) { \
static_assert(Enable == (IsIntegral || (_POINTER_ALLOWED_ && IsPointer))); \
volatile StorageType * const p = this->GetStoragePointer(); \
\
StorageType current; \
do { \
current = impl::LoadAcquireExclusiveForAtomic(p); \
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic(p, ConvertToStorage(ConvertToType(current) _OPERATOR_ arg)))); \
return ConvertToType(current); \
} \
\
template::type> \
ALWAYS_INLINE T operator _OPERATOR_##=(DifferenceType arg) { \
static_assert(Enable == (IsIntegral || (_POINTER_ALLOWED_ && IsPointer))); \
return this->Fetch ## _OPERATION_(arg) _OPERATOR_ arg; \
}
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Add, +, true)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Sub, -, true)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(And, &, false)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Or, |, false)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Xor, ^, false)
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION
template::type>
ALWAYS_INLINE T operator++() { static_assert(Enable == HasArithmeticFunctions); return this->FetchAdd(1) + 1; }
template::type>
ALWAYS_INLINE T operator++(int) { static_assert(Enable == HasArithmeticFunctions); return this->FetchAdd(1); }
template::type>
ALWAYS_INLINE T operator--() { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1) - 1; }
template::type>
ALWAYS_INLINE T operator--(int) { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1); }
};
template
class AtomicRef {
NON_MOVEABLE(AtomicRef);
public:
static constexpr size_t RequiredAlignment = std::max(sizeof(T), alignof(T));
private:
using StorageType = impl::AtomicStorage;
static_assert(sizeof(StorageType) == sizeof(T));
static_assert(alignof(StorageType) >= alignof(T));
static constexpr bool IsIntegral = std::integral;
static constexpr bool IsPointer = std::is_pointer::value;
static constexpr bool HasArithmeticFunctions = IsIntegral || IsPointer;
using DifferenceType = typename std::conditional::type>::type;
static constexpr ALWAYS_INLINE T ConvertToType(StorageType s) {
return impl::ConvertToTypeForAtomic(s);
}
static constexpr ALWAYS_INLINE StorageType ConvertToStorage(T arg) {
return impl::ConvertToStorageForAtomic(arg);
}
private:
volatile StorageType * const m_p;
private:
ALWAYS_INLINE volatile StorageType *GetStoragePointer() const { return m_p; }
public:
explicit ALWAYS_INLINE AtomicRef(T &t) : m_p(reinterpret_cast(std::addressof(t))) { /* ... */ }
ALWAYS_INLINE AtomicRef(const AtomicRef &) noexcept = default;
AtomicRef() = delete;
AtomicRef &operator=(const AtomicRef &) = delete;
ALWAYS_INLINE T operator=(T desired) const { return const_cast(this)->Store(desired); }
ALWAYS_INLINE operator T() const { return this->Load(); }
template
ALWAYS_INLINE T Load() const {
return ConvertToType(impl::AtomicLoadImpl(this->GetStoragePointer()));
}
template
ALWAYS_INLINE void Store(T arg) const {
return impl::AtomicStoreImpl(this->GetStoragePointer(), ConvertToStorage(arg));
}
template
ALWAYS_INLINE T Exchange(T arg) const {
return ConvertToType(impl::AtomicExchangeImpl(this->GetStoragePointer(), ConvertToStorage(arg)));
}
template
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) const {
return impl::AtomicCompareExchangeWeakImpl(this->GetStoragePointer(), expected, desired);
}
template
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) const {
return impl::AtomicCompareExchangeStrongImpl(this->GetStoragePointer(), expected, desired);
}
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATOR_, _POINTER_ALLOWED_) \
template::type> \
ALWAYS_INLINE T Fetch ## _OPERATION_(DifferenceType arg) const { \
static_assert(Enable == (IsIntegral || (_POINTER_ALLOWED_ && IsPointer))); \
volatile StorageType * const p = this->GetStoragePointer(); \
\
StorageType current; \
do { \
current = impl::LoadAcquireExclusiveForAtomic(p); \
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic(p, ConvertToStorage(ConvertToType(current) _OPERATOR_ arg)))); \
return ConvertToType(current); \
} \
\
template::type> \
ALWAYS_INLINE T operator _OPERATOR_##=(DifferenceType arg) const { \
static_assert(Enable == (IsIntegral || (_POINTER_ALLOWED_ && IsPointer))); \
return this->Fetch ## _OPERATION_(arg) _OPERATOR_ arg; \
}
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Add, +, true)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Sub, -, true)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(And, &, false)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Or, |, false)
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Xor, ^, false)
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION
template::type>
ALWAYS_INLINE T operator++() const { static_assert(Enable == HasArithmeticFunctions); return this->FetchAdd(1) + 1; }
template::type>
ALWAYS_INLINE T operator++(int) const { static_assert(Enable == HasArithmeticFunctions); return this->FetchAdd(1); }
template::type>
ALWAYS_INLINE T operator--() const { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1) - 1; }
template::type>
ALWAYS_INLINE T operator--(int) const { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1); }
};
}