mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-09 22:56:35 +00:00
fs: implement newer allocator templates
This commit is contained in:
parent
ace409ccec
commit
64b4cc25fc
3 changed files with 144 additions and 16 deletions
|
@ -28,6 +28,94 @@ namespace ams::fs {
|
|||
void *Allocate(size_t size);
|
||||
void Deallocate(void *ptr, size_t size);
|
||||
|
||||
void LockAllocatorMutex();
|
||||
void UnlockAllocatorMutex();
|
||||
|
||||
void *AllocateUnsafe(size_t size);
|
||||
void DeallocateUnsafe(void *ptr, size_t size);
|
||||
|
||||
class AllocatorImpl {
|
||||
public:
|
||||
static ALWAYS_INLINE void *Allocate(size_t size) { return ::ams::fs::impl::Allocate(size); }
|
||||
static ALWAYS_INLINE void *AllocateUnsafe(size_t size) { return ::ams::fs::impl::AllocateUnsafe(size); }
|
||||
|
||||
static ALWAYS_INLINE void Deallocate(void *ptr, size_t size) { return ::ams::fs::impl::Deallocate(ptr, size); }
|
||||
static ALWAYS_INLINE void DeallocateUnsafe(void *ptr, size_t size) { return ::ams::fs::impl::DeallocateUnsafe(ptr, size); }
|
||||
|
||||
static ALWAYS_INLINE void LockAllocatorMutex() { return ::ams::fs::impl::LockAllocatorMutex(); }
|
||||
static ALWAYS_INLINE void UnlockAllocatorMutex() { return ::ams::fs::impl::UnlockAllocatorMutex(); }
|
||||
};
|
||||
|
||||
template<typename T, typename Impl, bool AllocateWhileLocked>
|
||||
class AllocatorTemplate : public std::allocator<T> {
|
||||
public:
|
||||
template<typename U>
|
||||
struct rebind {
|
||||
using other = AllocatorTemplate<U, Impl, AllocateWhileLocked>;
|
||||
};
|
||||
private:
|
||||
bool m_allocation_failed;
|
||||
private:
|
||||
static ALWAYS_INLINE T *AllocateImpl(::std::size_t n) {
|
||||
if constexpr (AllocateWhileLocked) {
|
||||
auto * const p = Impl::AllocateUnsafe(sizeof(T) * n);
|
||||
Impl::UnlockAllocatorMutex();
|
||||
return static_cast<T *>(p);
|
||||
} else {
|
||||
return static_cast<T *>(Impl::Allocate(sizeof(T) * n));
|
||||
}
|
||||
}
|
||||
public:
|
||||
AllocatorTemplate() : m_allocation_failed(false) { /* ... */ }
|
||||
|
||||
template<typename U>
|
||||
AllocatorTemplate(const AllocatorTemplate<U, Impl, AllocateWhileLocked> &rhs) : m_allocation_failed(rhs.IsAllocationFailed()) { /* ... */ }
|
||||
|
||||
bool IsAllocationFailed() const { return m_allocation_failed; }
|
||||
|
||||
[[nodiscard]] T *allocate(::std::size_t n) {
|
||||
auto * const p = AllocateImpl(n);
|
||||
if (AMS_UNLIKELY(p == nullptr) && n) {
|
||||
m_allocation_failed = true;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
void deallocate(T *p, ::std::size_t n) {
|
||||
Impl::Deallocate(p, sizeof(T) * n);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T, typename Impl>
|
||||
using AllocatorTemplateForAllocateShared = AllocatorTemplate<T, Impl, true>;
|
||||
|
||||
template<typename T, template<typename, typename> class AllocatorTemplateT, typename Impl, typename... Args>
|
||||
std::shared_ptr<T> AllocateSharedImpl(Args &&... args) {
|
||||
/* Try to allocate. */
|
||||
{
|
||||
/* Acquire exclusive access to the allocator. */
|
||||
Impl::LockAllocatorMutex();
|
||||
|
||||
/* Check that we can allocate memory (using overestimate of 0x80 + sizeof(T)). */
|
||||
if (auto * const p = Impl::Allocate(0x80 + sizeof(T)); AMS_LIKELY(p != nullptr)) {
|
||||
/* Free the memory we allocated. */
|
||||
Impl::Deallocate(p, 0x80 + sizeof(T));
|
||||
|
||||
/* Get allocator type. */
|
||||
using AllocatorType = AllocatorTemplateT<T, Impl>;
|
||||
|
||||
/* Allocate the shared pointer. */
|
||||
return std::allocate_shared<T>(AllocatorType{}, std::forward<Args>(args)...);
|
||||
} else {
|
||||
/* We can't allocate. */
|
||||
Impl::UnlockAllocatorMutex();
|
||||
}
|
||||
}
|
||||
|
||||
/* We failed. */
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
class Deleter {
|
||||
private:
|
||||
size_t m_size;
|
||||
|
@ -59,4 +147,9 @@ namespace ams::fs {
|
|||
|
||||
}
|
||||
|
||||
template<typename T, typename... Args>
|
||||
std::shared_ptr<T> AllocateShared(Args &&... args) {
|
||||
return ::ams::fs::impl::AllocateSharedImpl<T, ::ams::fs::impl::AllocatorTemplateForAllocateShared, ::ams::fs::impl::AllocatorImpl>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ namespace ams::fs {
|
|||
|
||||
namespace {
|
||||
|
||||
bool g_used_default_allocator;
|
||||
constinit bool g_used_default_allocator = false;
|
||||
|
||||
void *DefaultAllocate(size_t size) {
|
||||
g_used_default_allocator = true;
|
||||
|
@ -31,7 +31,7 @@ namespace ams::fs {
|
|||
ams::Free(ptr);
|
||||
}
|
||||
|
||||
constinit os::SdkMutex g_lock;
|
||||
constinit os::SdkMutex g_mutex;
|
||||
constinit AllocateFunction g_allocate_func = DefaultAllocate;
|
||||
constinit DeallocateFunction g_deallocate_func = DefaultDeallocate;
|
||||
|
||||
|
@ -59,26 +59,61 @@ namespace ams::fs {
|
|||
|
||||
namespace impl {
|
||||
|
||||
void *Allocate(size_t size) {
|
||||
void *ptr;
|
||||
{
|
||||
std::scoped_lock lk(g_lock);
|
||||
ptr = g_allocate_func(size);
|
||||
if (!util::IsAligned(reinterpret_cast<uintptr_t>(ptr), RequiredAlignment)) {
|
||||
R_ABORT_UNLESS(fs::ResultAllocatorAlignmentViolation());
|
||||
}
|
||||
void LockAllocatorMutex() {
|
||||
g_mutex.Lock();
|
||||
}
|
||||
|
||||
void UnlockAllocatorMutex() {
|
||||
g_mutex.Unlock();
|
||||
}
|
||||
|
||||
void *AllocateUnsafe(size_t size) {
|
||||
/* Check pre-conditions. */
|
||||
AMS_ASSERT(g_mutex.IsLockedByCurrentThread());
|
||||
|
||||
/* Allocate. */
|
||||
void * const ptr = g_allocate_func(size);
|
||||
|
||||
/* Check alignment. */
|
||||
if (AMS_UNLIKELY(!util::IsAligned(reinterpret_cast<uintptr_t>(ptr), RequiredAlignment))) {
|
||||
R_ABORT_UNLESS(fs::ResultAllocatorAlignmentViolation());
|
||||
}
|
||||
|
||||
/* Return allocated pointer. */
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void Deallocate(void *ptr, size_t size) {
|
||||
if (ptr == nullptr) {
|
||||
return;
|
||||
}
|
||||
std::scoped_lock lk(g_lock);
|
||||
void DeallocateUnsafe(void *ptr, size_t size) {
|
||||
/* Check pre-conditions. */
|
||||
AMS_ASSERT(g_mutex.IsLockedByCurrentThread());
|
||||
|
||||
/* Deallocate the pointer. */
|
||||
g_deallocate_func(ptr, size);
|
||||
}
|
||||
|
||||
void *Allocate(size_t size) {
|
||||
/* Check pre-conditions. */
|
||||
AMS_ASSERT(g_allocate_func != nullptr);
|
||||
|
||||
/* Lock the allocator. */
|
||||
std::scoped_lock lk(g_mutex);
|
||||
|
||||
return AllocateUnsafe(size);
|
||||
}
|
||||
|
||||
void Deallocate(void *ptr, size_t size) {
|
||||
/* Check pre-conditions. */
|
||||
AMS_ASSERT(g_deallocate_func != nullptr);
|
||||
|
||||
/* If the pointer is non-null, deallocate it. */
|
||||
if (ptr != nullptr) {
|
||||
/* Lock the allocator. */
|
||||
std::scoped_lock lk(g_mutex);
|
||||
|
||||
DeallocateUnsafe(ptr, size);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -352,7 +352,7 @@ namespace ams::fssystem {
|
|||
R_UNLESS(storage_size > 0, fs::ResultInvalidNcaHeader());
|
||||
|
||||
/* Allocate a substorage. */
|
||||
*out = AllocateShared<DerivedStorageHolder<fs::SubStorage, 0>>(m_reader->GetBodyStorage(), storage_offset, storage_size, m_reader);
|
||||
*out = fssystem::AllocateShared<DerivedStorageHolder<fs::SubStorage, 0>>(m_reader->GetBodyStorage(), storage_offset, storage_size, m_reader);
|
||||
R_UNLESS(*out != nullptr, fs::ResultAllocationFailureInAllocateShared());
|
||||
|
||||
return ResultSuccess();
|
||||
|
|
Loading…
Reference in a new issue