2020-03-08 08:06:23 +00:00
|
|
|
/*
|
2021-10-04 19:59:10 +00:00
|
|
|
* Copyright (c) Atmosphère-NX
|
2020-03-08 08:06:23 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#pragma once
|
2021-10-01 02:21:08 +00:00
|
|
|
#include <stratosphere/fs/fs_common.hpp>
|
2020-03-08 08:06:23 +00:00
|
|
|
|
|
|
|
namespace ams::fs {
|
|
|
|
|
2022-03-24 15:43:40 +00:00
|
|
|
/* ACCURATE_TO_VERSION: Unknown */
|
2020-03-08 08:06:23 +00:00
|
|
|
using AllocateFunction = void *(*)(size_t);
|
|
|
|
using DeallocateFunction = void (*)(void *, size_t);
|
|
|
|
|
|
|
|
void SetAllocator(AllocateFunction allocator, DeallocateFunction deallocator);
|
|
|
|
|
|
|
|
namespace impl {
|
|
|
|
|
fs: reduce path size 0x28 -> 0x18
This implements two optimizations on fs::Path, which N added in 12.0.0.
The current structure looks like:
```cpp
struct Path {
const char *m_str; // Points to the read-only path string
char *m_write_buffer_buffer; // Part of std::unique_ptr<char[], ams::fs::impl::Deleter>
ams::fs::impl::Deleter m_write_buffer_deleter; // Parse of std::unique_ptr<char[], ams::fs::impl::Deleter>, stores the size of the buffer.
size_t m_write_buffer_length; // Copy of the write buffer's size accessible to the Path() structure.
bool m_is_normalized; // Whether the path buffer is normalized
};
```
This is pretty wasteful. The write buffer size is stored twice, wasting 8 bytes, because one copy of the size isn't accessible to the path.
In addition, due to alignment, the bool wastes 7 padding bytes.
This commit:
* Encodes normalized in the low bit of the write buffer length, saving 8 bytes.
* Use a custom WriteBuffer class rather than generic unique_ptr, to avoid needing to store the WriteBuffer twice.
These each save 8 bytes, for a final size of 0x18 rather than 0x28.
2022-03-25 03:22:47 +00:00
|
|
|
class Newable;
|
|
|
|
|
2020-03-08 08:06:23 +00:00
|
|
|
void *Allocate(size_t size);
|
|
|
|
void Deallocate(void *ptr, size_t size);
|
|
|
|
|
2021-12-07 04:34:10 +00:00
|
|
|
void LockAllocatorMutex();
|
|
|
|
void UnlockAllocatorMutex();
|
|
|
|
|
|
|
|
void *AllocateUnsafe(size_t size);
|
|
|
|
void DeallocateUnsafe(void *ptr, size_t size);
|
|
|
|
|
|
|
|
class AllocatorImpl {
|
|
|
|
public:
|
|
|
|
static ALWAYS_INLINE void *Allocate(size_t size) { return ::ams::fs::impl::Allocate(size); }
|
|
|
|
static ALWAYS_INLINE void *AllocateUnsafe(size_t size) { return ::ams::fs::impl::AllocateUnsafe(size); }
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void Deallocate(void *ptr, size_t size) { return ::ams::fs::impl::Deallocate(ptr, size); }
|
|
|
|
static ALWAYS_INLINE void DeallocateUnsafe(void *ptr, size_t size) { return ::ams::fs::impl::DeallocateUnsafe(ptr, size); }
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void LockAllocatorMutex() { return ::ams::fs::impl::LockAllocatorMutex(); }
|
|
|
|
static ALWAYS_INLINE void UnlockAllocatorMutex() { return ::ams::fs::impl::UnlockAllocatorMutex(); }
|
|
|
|
};
|
|
|
|
|
|
|
|
template<typename T, typename Impl, bool AllocateWhileLocked>
|
|
|
|
class AllocatorTemplate : public std::allocator<T> {
|
|
|
|
public:
|
|
|
|
template<typename U>
|
|
|
|
struct rebind {
|
|
|
|
using other = AllocatorTemplate<U, Impl, AllocateWhileLocked>;
|
|
|
|
};
|
|
|
|
private:
|
|
|
|
bool m_allocation_failed;
|
|
|
|
private:
|
|
|
|
static ALWAYS_INLINE T *AllocateImpl(::std::size_t n) {
|
|
|
|
if constexpr (AllocateWhileLocked) {
|
|
|
|
auto * const p = Impl::AllocateUnsafe(sizeof(T) * n);
|
|
|
|
Impl::UnlockAllocatorMutex();
|
|
|
|
return static_cast<T *>(p);
|
|
|
|
} else {
|
|
|
|
return static_cast<T *>(Impl::Allocate(sizeof(T) * n));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
public:
|
|
|
|
AllocatorTemplate() : m_allocation_failed(false) { /* ... */ }
|
|
|
|
|
|
|
|
template<typename U>
|
|
|
|
AllocatorTemplate(const AllocatorTemplate<U, Impl, AllocateWhileLocked> &rhs) : m_allocation_failed(rhs.IsAllocationFailed()) { /* ... */ }
|
|
|
|
|
|
|
|
bool IsAllocationFailed() const { return m_allocation_failed; }
|
|
|
|
|
|
|
|
[[nodiscard]] T *allocate(::std::size_t n) {
|
|
|
|
auto * const p = AllocateImpl(n);
|
|
|
|
if (AMS_UNLIKELY(p == nullptr) && n) {
|
|
|
|
m_allocation_failed = true;
|
|
|
|
}
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
void deallocate(T *p, ::std::size_t n) {
|
|
|
|
Impl::Deallocate(p, sizeof(T) * n);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template<typename T, typename Impl>
|
|
|
|
using AllocatorTemplateForAllocateShared = AllocatorTemplate<T, Impl, true>;
|
|
|
|
|
|
|
|
template<typename T, template<typename, typename> class AllocatorTemplateT, typename Impl, typename... Args>
|
|
|
|
std::shared_ptr<T> AllocateSharedImpl(Args &&... args) {
|
|
|
|
/* Try to allocate. */
|
|
|
|
{
|
|
|
|
/* Acquire exclusive access to the allocator. */
|
|
|
|
Impl::LockAllocatorMutex();
|
|
|
|
|
|
|
|
/* Check that we can allocate memory (using overestimate of 0x80 + sizeof(T)). */
|
2021-12-07 04:35:48 +00:00
|
|
|
if (auto * const p = Impl::AllocateUnsafe(0x80 + sizeof(T)); AMS_LIKELY(p != nullptr)) {
|
2021-12-07 04:34:10 +00:00
|
|
|
/* Free the memory we allocated. */
|
2021-12-07 04:35:48 +00:00
|
|
|
Impl::DeallocateUnsafe(p, 0x80 + sizeof(T));
|
2021-12-07 04:34:10 +00:00
|
|
|
|
|
|
|
/* Get allocator type. */
|
|
|
|
using AllocatorType = AllocatorTemplateT<T, Impl>;
|
|
|
|
|
|
|
|
/* Allocate the shared pointer. */
|
|
|
|
return std::allocate_shared<T>(AllocatorType{}, std::forward<Args>(args)...);
|
|
|
|
} else {
|
|
|
|
/* We can't allocate. */
|
|
|
|
Impl::UnlockAllocatorMutex();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We failed. */
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2020-03-08 08:06:23 +00:00
|
|
|
class Deleter {
|
|
|
|
private:
|
2021-10-10 07:14:06 +00:00
|
|
|
size_t m_size;
|
2020-03-08 08:06:23 +00:00
|
|
|
public:
|
2021-10-10 07:14:06 +00:00
|
|
|
Deleter() : m_size() { /* ... */ }
|
|
|
|
explicit Deleter(size_t sz) : m_size(sz) { /* ... */ }
|
2020-03-08 08:06:23 +00:00
|
|
|
|
|
|
|
void operator()(void *ptr) const {
|
2021-10-10 07:14:06 +00:00
|
|
|
::ams::fs::impl::Deallocate(ptr, m_size);
|
2020-03-08 08:06:23 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template<typename T>
|
fs: reduce path size 0x28 -> 0x18
This implements two optimizations on fs::Path, which N added in 12.0.0.
The current structure looks like:
```cpp
struct Path {
const char *m_str; // Points to the read-only path string
char *m_write_buffer_buffer; // Part of std::unique_ptr<char[], ams::fs::impl::Deleter>
ams::fs::impl::Deleter m_write_buffer_deleter; // Parse of std::unique_ptr<char[], ams::fs::impl::Deleter>, stores the size of the buffer.
size_t m_write_buffer_length; // Copy of the write buffer's size accessible to the Path() structure.
bool m_is_normalized; // Whether the path buffer is normalized
};
```
This is pretty wasteful. The write buffer size is stored twice, wasting 8 bytes, because one copy of the size isn't accessible to the path.
In addition, due to alignment, the bool wastes 7 padding bytes.
This commit:
* Encodes normalized in the low bit of the write buffer length, saving 8 bytes.
* Use a custom WriteBuffer class rather than generic unique_ptr, to avoid needing to store the WriteBuffer twice.
These each save 8 bytes, for a final size of 0x18 rather than 0x28.
2022-03-25 03:22:47 +00:00
|
|
|
auto MakeUnique() {
|
|
|
|
/* Check that we're not using MakeUnique unnecessarily. */
|
|
|
|
static_assert(!std::derived_from<T, ::ams::fs::impl::Newable>);
|
|
|
|
|
2020-03-08 08:06:23 +00:00
|
|
|
return std::unique_ptr<T, Deleter>(static_cast<T *>(::ams::fs::impl::Allocate(sizeof(T))), Deleter(sizeof(T)));
|
|
|
|
}
|
|
|
|
|
2020-05-11 22:04:51 +00:00
|
|
|
template<typename ArrayT>
|
fs: reduce path size 0x28 -> 0x18
This implements two optimizations on fs::Path, which N added in 12.0.0.
The current structure looks like:
```cpp
struct Path {
const char *m_str; // Points to the read-only path string
char *m_write_buffer_buffer; // Part of std::unique_ptr<char[], ams::fs::impl::Deleter>
ams::fs::impl::Deleter m_write_buffer_deleter; // Parse of std::unique_ptr<char[], ams::fs::impl::Deleter>, stores the size of the buffer.
size_t m_write_buffer_length; // Copy of the write buffer's size accessible to the Path() structure.
bool m_is_normalized; // Whether the path buffer is normalized
};
```
This is pretty wasteful. The write buffer size is stored twice, wasting 8 bytes, because one copy of the size isn't accessible to the path.
In addition, due to alignment, the bool wastes 7 padding bytes.
This commit:
* Encodes normalized in the low bit of the write buffer length, saving 8 bytes.
* Use a custom WriteBuffer class rather than generic unique_ptr, to avoid needing to store the WriteBuffer twice.
These each save 8 bytes, for a final size of 0x18 rather than 0x28.
2022-03-25 03:22:47 +00:00
|
|
|
auto MakeUnique(size_t size) {
|
2020-05-11 22:04:51 +00:00
|
|
|
using T = typename std::remove_extent<ArrayT>::type;
|
|
|
|
|
2020-05-14 09:22:24 +00:00
|
|
|
static_assert(util::is_pod<ArrayT>::value);
|
2020-05-11 22:04:51 +00:00
|
|
|
static_assert(std::is_array<ArrayT>::value);
|
|
|
|
|
fs: reduce path size 0x28 -> 0x18
This implements two optimizations on fs::Path, which N added in 12.0.0.
The current structure looks like:
```cpp
struct Path {
const char *m_str; // Points to the read-only path string
char *m_write_buffer_buffer; // Part of std::unique_ptr<char[], ams::fs::impl::Deleter>
ams::fs::impl::Deleter m_write_buffer_deleter; // Parse of std::unique_ptr<char[], ams::fs::impl::Deleter>, stores the size of the buffer.
size_t m_write_buffer_length; // Copy of the write buffer's size accessible to the Path() structure.
bool m_is_normalized; // Whether the path buffer is normalized
};
```
This is pretty wasteful. The write buffer size is stored twice, wasting 8 bytes, because one copy of the size isn't accessible to the path.
In addition, due to alignment, the bool wastes 7 padding bytes.
This commit:
* Encodes normalized in the low bit of the write buffer length, saving 8 bytes.
* Use a custom WriteBuffer class rather than generic unique_ptr, to avoid needing to store the WriteBuffer twice.
These each save 8 bytes, for a final size of 0x18 rather than 0x28.
2022-03-25 03:22:47 +00:00
|
|
|
/* Check that we're not using MakeUnique unnecessarily. */
|
|
|
|
static_assert(!std::derived_from<T, ::ams::fs::impl::Newable>);
|
|
|
|
|
|
|
|
using ReturnType = std::unique_ptr<ArrayT, Deleter>;
|
|
|
|
|
2020-05-11 22:04:51 +00:00
|
|
|
const size_t alloc_size = sizeof(T) * size;
|
fs: reduce path size 0x28 -> 0x18
This implements two optimizations on fs::Path, which N added in 12.0.0.
The current structure looks like:
```cpp
struct Path {
const char *m_str; // Points to the read-only path string
char *m_write_buffer_buffer; // Part of std::unique_ptr<char[], ams::fs::impl::Deleter>
ams::fs::impl::Deleter m_write_buffer_deleter; // Parse of std::unique_ptr<char[], ams::fs::impl::Deleter>, stores the size of the buffer.
size_t m_write_buffer_length; // Copy of the write buffer's size accessible to the Path() structure.
bool m_is_normalized; // Whether the path buffer is normalized
};
```
This is pretty wasteful. The write buffer size is stored twice, wasting 8 bytes, because one copy of the size isn't accessible to the path.
In addition, due to alignment, the bool wastes 7 padding bytes.
This commit:
* Encodes normalized in the low bit of the write buffer length, saving 8 bytes.
* Use a custom WriteBuffer class rather than generic unique_ptr, to avoid needing to store the WriteBuffer twice.
These each save 8 bytes, for a final size of 0x18 rather than 0x28.
2022-03-25 03:22:47 +00:00
|
|
|
return ReturnType(static_cast<T *>(::ams::fs::impl::Allocate(alloc_size)), Deleter(alloc_size));
|
2020-05-11 22:04:51 +00:00
|
|
|
}
|
|
|
|
|
2020-03-08 08:06:23 +00:00
|
|
|
}
|
|
|
|
|
2021-12-07 04:34:10 +00:00
|
|
|
template<typename T, typename... Args>
|
|
|
|
std::shared_ptr<T> AllocateShared(Args &&... args) {
|
|
|
|
return ::ams::fs::impl::AllocateSharedImpl<T, ::ams::fs::impl::AllocatorTemplateForAllocateShared, ::ams::fs::impl::AllocatorImpl>(std::forward<Args>(args)...);
|
|
|
|
}
|
|
|
|
|
2020-03-08 08:06:23 +00:00
|
|
|
}
|