diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_i_buffer_manager.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_i_buffer_manager.hpp
new file mode 100644
index 000000000..a7a3b814d
--- /dev/null
+++ b/libraries/libstratosphere/include/stratosphere/fs/fs_i_buffer_manager.hpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include
+
+namespace ams::fs {
+
+ class IBufferManager {
+ public:
+ class BufferAttribute {
+ private:
+ s32 m_level;
+ public:
+ constexpr BufferAttribute() : m_level(0) { /* ... */ }
+ constexpr explicit BufferAttribute(s32 l) : m_level(l) { /* ... */ }
+
+ constexpr s32 GetLevel() const { return m_level; }
+ };
+
+ using CacheHandle = u64;
+
+ static constexpr s32 BufferLevelMin = 0;
+
+ using MemoryRange = std::pair;
+
+ static constexpr ALWAYS_INLINE MemoryRange MakeMemoryRange(uintptr_t address, size_t size) { return MemoryRange(address, size); }
+ public:
+ virtual ~IBufferManager() { /* ... */ }
+
+ ALWAYS_INLINE const MemoryRange AllocateBuffer(size_t size, const BufferAttribute &attr) {
+ return this->DoAllocateBuffer(size, attr);
+ }
+
+ ALWAYS_INLINE const MemoryRange AllocateBuffer(size_t size) {
+ return this->DoAllocateBuffer(size, BufferAttribute());
+ }
+
+ ALWAYS_INLINE void DeallocateBuffer(uintptr_t address, size_t size) {
+ return this->DoDeallocateBuffer(address, size);
+ }
+
+ ALWAYS_INLINE void DeallocateBuffer(const MemoryRange &memory_range) {
+ return this->DoDeallocateBuffer(memory_range.first, memory_range.second);
+ }
+
+ ALWAYS_INLINE CacheHandle RegisterCache(uintptr_t address, size_t size, const BufferAttribute &attr) {
+ return this->DoRegisterCache(address, size, attr);
+ }
+
+ ALWAYS_INLINE CacheHandle RegisterCache(const MemoryRange &memory_range, const BufferAttribute &attr) {
+ return this->DoRegisterCache(memory_range.first, memory_range.second, attr);
+ }
+
+ ALWAYS_INLINE const std::pair AcquireCache(CacheHandle handle) {
+ return this->DoAcquireCache(handle);
+ }
+
+ ALWAYS_INLINE size_t GetTotalSize() const {
+ return this->DoGetTotalSize();
+ }
+
+ ALWAYS_INLINE size_t GetFreeSize() const {
+ return this->DoGetFreeSize();
+ }
+
+ ALWAYS_INLINE size_t GetTotalAllocatableSize() const {
+ return this->DoGetTotalAllocatableSize();
+ }
+
+ ALWAYS_INLINE size_t GetFreeSizePeak() const {
+ return this->DoGetFreeSizePeak();
+ }
+
+ ALWAYS_INLINE size_t GetTotalAllocatableSizePeak() const {
+ return this->DoGetTotalAllocatableSizePeak();
+ }
+
+ ALWAYS_INLINE size_t GetRetriedCount() const {
+ return this->DoGetRetriedCount();
+ }
+
+ ALWAYS_INLINE void ClearPeak() {
+ return this->DoClearPeak();
+ }
+ protected:
+ virtual const MemoryRange DoAllocateBuffer(size_t size, const BufferAttribute &attr) = 0;
+
+ virtual void DoDeallocateBuffer(uintptr_t address, size_t size) = 0;
+
+ virtual CacheHandle DoRegisterCache(uintptr_t address, size_t size, const BufferAttribute &attr) = 0;
+
+ virtual const MemoryRange DoAcquireCache(CacheHandle handle) = 0;
+
+ virtual size_t DoGetTotalSize() const = 0;
+
+ virtual size_t DoGetFreeSize() const = 0;
+
+ virtual size_t DoGetTotalAllocatableSize() const = 0;
+
+ virtual size_t DoGetFreeSizePeak() const = 0;
+
+ virtual size_t DoGetTotalAllocatableSizePeak() const = 0;
+
+ virtual size_t DoGetRetriedCount() const = 0;
+
+ virtual void DoClearPeak() = 0;
+ };
+
+}
diff --git a/libraries/libstratosphere/include/stratosphere/fssrv/fscreator/fssrv_storage_on_nca_creator.hpp b/libraries/libstratosphere/include/stratosphere/fssrv/fscreator/fssrv_storage_on_nca_creator.hpp
index 9b3b46935..973cfff3e 100644
--- a/libraries/libstratosphere/include/stratosphere/fssrv/fscreator/fssrv_storage_on_nca_creator.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssrv/fscreator/fssrv_storage_on_nca_creator.hpp
@@ -16,7 +16,7 @@
#pragma once
#include
#include
-#include
+#include
#include
namespace ams::fssystem {
@@ -35,10 +35,10 @@ namespace ams::fssrv::fscreator {
MemoryResource *m_allocator;
const fssystem::NcaCryptoConfiguration &m_nca_crypto_cfg;
const fssystem::NcaCompressionConfiguration &m_nca_compression_cfg;
- fssystem::IBufferManager * const m_buffer_manager;
+ fs::IBufferManager * const m_buffer_manager;
fssystem::IHash256GeneratorFactorySelector * const m_hash_generator_factory_selector;
public:
- explicit StorageOnNcaCreator(MemoryResource *mr, const fssystem::NcaCryptoConfiguration &cfg, const fssystem::NcaCompressionConfiguration &c_cfg, fssystem::IBufferManager *bm, fssystem::IHash256GeneratorFactorySelector *hgfs)
+ explicit StorageOnNcaCreator(MemoryResource *mr, const fssystem::NcaCryptoConfiguration &cfg, const fssystem::NcaCompressionConfiguration &c_cfg, fs::IBufferManager *bm, fssystem::IHash256GeneratorFactorySelector *hgfs)
: m_allocator(mr), m_nca_crypto_cfg(cfg), m_nca_compression_cfg(c_cfg), m_buffer_manager(bm), m_hash_generator_factory_selector(hgfs)
{
/* ... */
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_buffer_manager_utils.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_buffer_manager_utils.hpp
index b47bdca90..928553aec 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_buffer_manager_utils.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_buffer_manager_utils.hpp
@@ -16,6 +16,7 @@
#pragma once
#include
#include
+#include
namespace ams::fssystem::buffers {
@@ -88,13 +89,13 @@ namespace ams::fssystem::buffers {
};
template
- Result AllocateBufferUsingBufferManagerContext(std::pair *out, fssystem::IBufferManager *buffer_manager, size_t size, const IBufferManager::BufferAttribute attribute, IsValidBufferFunction is_valid_buffer, const char *func_name) {
+ Result AllocateBufferUsingBufferManagerContext(fs::IBufferManager::MemoryRange *out, fs::IBufferManager *buffer_manager, size_t size, const fs::IBufferManager::BufferAttribute attribute, IsValidBufferFunction is_valid_buffer, const char *func_name) {
AMS_ASSERT(out != nullptr);
AMS_ASSERT(buffer_manager != nullptr);
AMS_ASSERT(func_name != nullptr);
/* Clear the output. */
- *out = std::pair(0, 0);
+ *out = fs::IBufferManager::MakeMemoryRange(0, 0);
/* Get the context. */
auto context = GetBufferManagerContext();
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_file_system_buddy_heap.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_file_system_buddy_heap.hpp
index 4b236d6b8..9653b3afc 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_file_system_buddy_heap.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_file_system_buddy_heap.hpp
@@ -16,6 +16,7 @@
#pragma once
#include
#include
+#include
namespace ams::fssystem {
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_file_system_buffer_manager.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_file_system_buffer_manager.hpp
index 765acc4fe..0fc803ca6 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_file_system_buffer_manager.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_file_system_buffer_manager.hpp
@@ -17,12 +17,12 @@
#include
#include
#include
-#include
+#include
#include
namespace ams::fssystem {
- class FileSystemBufferManager : public IBufferManager {
+ class FileSystemBufferManager : public fs::IBufferManager {
NON_COPYABLE(FileSystemBufferManager);
NON_MOVEABLE(FileSystemBufferManager);
public:
@@ -194,7 +194,7 @@ namespace ams::fssystem {
size_t m_peak_free_size;
size_t m_peak_total_allocatable_size;
size_t m_retried_count;
- mutable os::SdkRecursiveMutex m_mutex;
+ mutable os::SdkMutex m_mutex;
public:
static constexpr size_t QueryWorkBufferSize(s32 max_cache_count, s32 max_order) {
const auto buddy_size = FileSystemBuddyHeap::QueryWorkBufferSize(max_order);
@@ -269,27 +269,27 @@ namespace ams::fssystem {
m_cache_handle_table.Finalize();
}
private:
- virtual const std::pair AllocateBufferImpl(size_t size, const BufferAttribute &attr) override;
+ virtual const std::pair DoAllocateBuffer(size_t size, const BufferAttribute &attr) override;
- virtual void DeallocateBufferImpl(uintptr_t address, size_t size) override;
+ virtual void DoDeallocateBuffer(uintptr_t address, size_t size) override;
- virtual CacheHandle RegisterCacheImpl(uintptr_t address, size_t size, const BufferAttribute &attr) override;
+ virtual CacheHandle DoRegisterCache(uintptr_t address, size_t size, const BufferAttribute &attr) override;
- virtual const std::pair AcquireCacheImpl(CacheHandle handle) override;
+ virtual const std::pair DoAcquireCache(CacheHandle handle) override;
- virtual size_t GetTotalSizeImpl() const override;
+ virtual size_t DoGetTotalSize() const override;
- virtual size_t GetFreeSizeImpl() const override;
+ virtual size_t DoGetFreeSize() const override;
- virtual size_t GetTotalAllocatableSizeImpl() const override;
+ virtual size_t DoGetTotalAllocatableSize() const override;
- virtual size_t GetPeakFreeSizeImpl() const override;
+ virtual size_t DoGetFreeSizePeak() const override;
- virtual size_t GetPeakTotalAllocatableSizeImpl() const override;
+ virtual size_t DoGetTotalAllocatableSizePeak() const override;
- virtual size_t GetRetriedCountImpl() const override;
+ virtual size_t DoGetRetriedCount() const override;
- virtual void ClearPeakImpl() override;
+ virtual void DoClearPeak() override;
};
}
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_i_buffer_manager.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_i_buffer_manager.hpp
deleted file mode 100644
index 057fcfcc3..000000000
--- a/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_i_buffer_manager.hpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) Atmosphère-NX
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see .
- */
-#pragma once
-#include
-
-namespace ams::fssystem {
-
- class IBufferManager {
- public:
- class BufferAttribute {
- private:
- s32 m_level;
- public:
- constexpr BufferAttribute() : m_level(0) { /* ... */ }
- constexpr explicit BufferAttribute(s32 l) : m_level(l) { /* ... */ }
-
- constexpr s32 GetLevel() const { return m_level; }
- };
-
- using CacheHandle = s64;
-
- static constexpr s32 BufferLevelMin = 0;
- public:
- virtual ~IBufferManager() { /* ... */ }
-
- const std::pair AllocateBuffer(size_t size, const BufferAttribute &attr) {
- return this->AllocateBufferImpl(size, attr);
- }
-
- const std::pair AllocateBuffer(size_t size) {
- return this->AllocateBufferImpl(size, BufferAttribute());
- }
-
- void DeallocateBuffer(uintptr_t address, size_t size) {
- return this->DeallocateBufferImpl(address, size);
- }
-
- CacheHandle RegisterCache(uintptr_t address, size_t size, const BufferAttribute &attr) {
- return this->RegisterCacheImpl(address, size, attr);
- }
-
- const std::pair AcquireCache(CacheHandle handle) {
- return this->AcquireCacheImpl(handle);
- }
-
- size_t GetTotalSize() const {
- return this->GetTotalSizeImpl();
- }
-
- size_t GetFreeSize() const {
- return this->GetFreeSizeImpl();
- }
-
- size_t GetTotalAllocatableSize() const {
- return this->GetTotalAllocatableSizeImpl();
- }
-
- size_t GetPeakFreeSize() const {
- return this->GetPeakFreeSizeImpl();
- }
-
- size_t GetPeakTotalAllocatableSize() const {
- return this->GetPeakTotalAllocatableSizeImpl();
- }
-
- size_t GetRetriedCount() const {
- return this->GetRetriedCountImpl();
- }
-
- void ClearPeak() {
- return this->ClearPeakImpl();
- }
- protected:
- virtual const std::pair AllocateBufferImpl(size_t size, const BufferAttribute &attr) = 0;
-
- virtual void DeallocateBufferImpl(uintptr_t address, size_t size) = 0;
-
- virtual CacheHandle RegisterCacheImpl(uintptr_t address, size_t size, const BufferAttribute &attr) = 0;
-
- virtual const std::pair AcquireCacheImpl(CacheHandle handle) = 0;
-
- virtual size_t GetTotalSizeImpl() const = 0;
-
- virtual size_t GetFreeSizeImpl() const = 0;
-
- virtual size_t GetTotalAllocatableSizeImpl() const = 0;
-
- virtual size_t GetPeakFreeSizeImpl() const = 0;
-
- virtual size_t GetPeakTotalAllocatableSizeImpl() const = 0;
-
- virtual size_t GetRetriedCountImpl() const = 0;
-
- virtual void ClearPeakImpl() = 0;
- };
-
-}
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_aes_ctr_counter_extended_storage.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_aes_ctr_counter_extended_storage.hpp
index 1ca5828d6..e1a7922ae 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_aes_ctr_counter_extended_storage.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_aes_ctr_counter_extended_storage.hpp
@@ -94,22 +94,27 @@ namespace ams::fssystem {
virtual Result GetSize(s64 *out) override {
AMS_ASSERT(out != nullptr);
- *out = m_table.GetSize();
- return ResultSuccess();
+
+ BucketTree::Offsets offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(offsets)));
+
+ *out = offsets.end_offset;
+
+ R_SUCCEED();
}
virtual Result Flush() override {
- return ResultSuccess();
+ R_SUCCEED();
}
virtual Result Write(s64 offset, const void *buffer, size_t size) override {
AMS_UNUSED(offset, buffer, size);
- return fs::ResultUnsupportedOperationInAesCtrCounterExtendedStorageA();
+ R_THROW(fs::ResultUnsupportedOperationInAesCtrCounterExtendedStorageA());
}
virtual Result SetSize(s64 size) override {
AMS_UNUSED(size);
- return fs::ResultUnsupportedOperationInAesCtrCounterExtendedStorageB();
+ R_THROW(fs::ResultUnsupportedOperationInAesCtrCounterExtendedStorageB());
}
private:
Result Initialize(IAllocator *allocator, const void *key, size_t key_size, u32 secure_value, fs::SubStorage data_storage, fs::SubStorage table_storage);
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_bucket_tree.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_bucket_tree.hpp
index a289c2b19..f64399242 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_bucket_tree.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_bucket_tree.hpp
@@ -53,6 +53,29 @@ namespace ams::fssystem {
static_assert(util::is_pod::value);
static_assert(sizeof(NodeHeader) == 0x10);
+ struct Offsets {
+ s64 start_offset;
+ s64 end_offset;
+
+ constexpr bool IsInclude(s64 offset) const {
+ return this->start_offset <= offset & offset < this->end_offset;
+ }
+
+ constexpr bool IsInclude(s64 offset, s64 size) const {
+ return size > 0 && this->start_offset <= offset && size <= (this->end_offset - offset);
+ }
+ };
+ static_assert(util::is_pod::value);
+ static_assert(sizeof(Offsets) == 0x10);
+
+ struct OffsetCache {
+ Offsets offsets;
+ os::SdkMutex mutex;
+ bool is_initialized;
+
+ constexpr OffsetCache() : offsets{ -1, -1 }, mutex(), is_initialized(false) { /* ... */ }
+ };
+
class ContinuousReadingInfo {
private:
size_t m_read_size;
@@ -213,10 +236,9 @@ namespace ams::fssystem {
s32 m_entry_count;
s32 m_offset_count;
s32 m_entry_set_count;
- s64 m_start_offset;
- s64 m_end_offset;
+ OffsetCache m_offset_cache;
public:
- BucketTree() : m_node_storage(), m_entry_storage(), m_node_l1(), m_node_size(), m_entry_size(), m_entry_count(), m_offset_count(), m_entry_set_count(), m_start_offset(), m_end_offset() { /* ... */ }
+ BucketTree() : m_node_storage(), m_entry_storage(), m_node_l1(), m_node_size(), m_entry_size(), m_entry_count(), m_offset_count(), m_entry_set_count(), m_offset_cache() { /* ... */ }
~BucketTree() { this->Finalize(); }
Result Initialize(IAllocator *allocator, fs::SubStorage node_storage, fs::SubStorage entry_storage, size_t node_size, size_t entry_size, s32 entry_count);
@@ -226,22 +248,19 @@ namespace ams::fssystem {
bool IsInitialized() const { return m_node_size > 0; }
bool IsEmpty() const { return m_entry_size == 0; }
- Result Find(Visitor *visitor, s64 virtual_address) const;
+ Result Find(Visitor *visitor, s64 virtual_address);
Result InvalidateCache();
s32 GetEntryCount() const { return m_entry_count; }
IAllocator *GetAllocator() const { return m_node_l1.GetAllocator(); }
- s64 GetStart() const { return m_start_offset; }
- s64 GetEnd() const { return m_end_offset; }
- s64 GetSize() const { return m_end_offset - m_start_offset; }
+ Result GetOffsets(Offsets *out) {
+ /* Ensure we have an offset cache. */
+ R_TRY(this->EnsureOffsetCache());
- bool Includes(s64 offset) const {
- return m_start_offset <= offset && offset < m_end_offset;
- }
-
- bool Includes(s64 offset, s64 size) const {
- return size > 0 && m_start_offset <= offset && size <= m_end_offset - offset;
+ /* Set the output. */
+ *out = m_offset_cache.offsets;
+ R_SUCCEED();
}
private:
template
@@ -250,6 +269,7 @@ namespace ams::fssystem {
size_t size;
NodeHeader entry_set;
s32 entry_index;
+ Offsets offsets;
EntryType entry;
};
private:
@@ -262,6 +282,8 @@ namespace ams::fssystem {
s64 GetEntrySetIndex(s32 node_index, s32 offset_index) const {
return (m_offset_count - m_node_l1->count) + (m_offset_count * node_index) + offset_index;
}
+
+ Result EnsureOffsetCache();
};
class BucketTree::Visitor {
@@ -283,6 +305,7 @@ namespace ams::fssystem {
static_assert(util::is_pod::value);
private:
const BucketTree *m_tree;
+ BucketTree::Offsets m_offsets;
void *m_entry;
s32 m_entry_index;
s32 m_entry_set_count;
@@ -314,7 +337,7 @@ namespace ams::fssystem {
const BucketTree *GetTree() const { return m_tree; }
private:
- Result Initialize(const BucketTree *tree);
+ Result Initialize(const BucketTree *tree, const BucketTree::Offsets &offsets);
Result Find(s64 virtual_address);
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_bucket_tree_template_impl.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_bucket_tree_template_impl.hpp
index 3dc024a09..069a8a8de 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_bucket_tree_template_impl.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_bucket_tree_template_impl.hpp
@@ -47,10 +47,15 @@ namespace ams::fssystem {
PooledBuffer pool(m_node_size, 1);
char *buffer = nullptr;
+ s64 entry_storage_size;
+ R_TRY(m_entry_storage.GetSize(std::addressof(entry_storage_size)));
+
/* Read the node. */
if (m_node_size <= pool.GetSize()) {
buffer = pool.GetBuffer();
const auto ofs = param.entry_set.index * static_cast(m_node_size);
+ R_UNLESS(m_node_size + ofs <= static_cast(entry_storage_size), fs::ResultInvalidBucketTreeNodeEntryCount());
+
R_TRY(m_entry_storage.Read(ofs, buffer, m_node_size));
}
@@ -59,9 +64,9 @@ namespace ams::fssystem {
s64 phys_offset = entry.GetPhysicalOffset();
/* Start merge tracking. */
- s64 merge_size = 0;
+ s64 merge_size = 0;
s64 readable_size = 0;
- bool merged = false;
+ bool merged = false;
/* Iterate. */
auto entry_index = param.entry_index;
@@ -89,7 +94,7 @@ namespace ams::fssystem {
}
next_entry_offset = next_entry.GetVirtualOffset();
- R_UNLESS(this->Includes(next_entry_offset), fs::ResultInvalidIndirectEntryOffset());
+ R_UNLESS(param.offsets.IsInclude(next_entry_offset), fs::ResultInvalidIndirectEntryOffset());
} else {
next_entry_offset = param.entry_set.offset;
}
@@ -103,7 +108,7 @@ namespace ams::fssystem {
/* Determine how much data we should read. */
const auto remaining_size = end_offset - cur_offset;
- const size_t read_size = static_cast(std::min(data_size, remaining_size));
+ const size_t read_size = static_cast(std::min(data_size, remaining_size));
AMS_ASSERT(read_size <= param.size);
/* Update our merge tracking. */
@@ -156,6 +161,7 @@ namespace ams::fssystem {
ContinuousReadingParam param = {
offset, size, m_entry_set.header, m_entry_index
};
+ std::memcpy(std::addressof(param.offsets), std::addressof(m_offsets), sizeof(BucketTree::Offsets));
std::memcpy(std::addressof(param.entry), m_entry, sizeof(EntryType));
/* Scan. */
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_compressed_storage.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_compressed_storage.hpp
index a9e5461b7..749ddec81 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_compressed_storage.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_compressed_storage.hpp
@@ -18,6 +18,8 @@
#include
#include
#include
+#include
+#include
namespace ams::fssystem {
@@ -50,51 +52,1387 @@ namespace ams::fssystem {
return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
}
private:
- /* TODO: CompressedStorageCore m_core; */
- /* TODO: CacheManager m_cache_manager; */
+ class CompressedStorageCore {
+ NON_COPYABLE(CompressedStorageCore);
+ NON_MOVEABLE(CompressedStorageCore);
+ private:
+ size_t m_block_size_max;
+ size_t m_continuous_reading_size_max;
+ BucketTree m_table;
+ fs::SubStorage m_data_storage;
+ GetDecompressorFunction m_get_decompressor_function;
+ public:
+ CompressedStorageCore() : m_table(), m_data_storage() { /* ... */ }
+
+ ~CompressedStorageCore() {
+ this->Finalize();
+ }
+ public:
+ Result Initialize(MemoryResource *bktr_allocator, fs::SubStorage data_storage, fs::SubStorage node_storage, fs::SubStorage entry_storage, s32 bktr_entry_count, size_t block_size_max, size_t continuous_reading_size_max, GetDecompressorFunction get_decompressor) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(bktr_allocator != nullptr);
+ AMS_ASSERT(0 < block_size_max);
+ AMS_ASSERT(block_size_max <= continuous_reading_size_max);
+ AMS_ASSERT(get_decompressor != nullptr);
+
+ /* Initialize our entry table. */
+ R_TRY(m_table.Initialize(bktr_allocator, node_storage, entry_storage, NodeSize, sizeof(Entry), bktr_entry_count));
+
+ /* Set our other fields. */
+ m_block_size_max = block_size_max;
+ m_continuous_reading_size_max = continuous_reading_size_max;
+ m_data_storage = data_storage;
+ m_get_decompressor_function = get_decompressor;
+
+ R_SUCCEED();
+ }
+
+ void Finalize() {
+ if (this->IsInitialized()) {
+ m_table.Finalize();
+ m_data_storage = fs::SubStorage();
+ }
+ }
+
+ fs::IStorage *GetDataStorage() { return std::addressof(m_data_storage); }
+
+ Result GetDataStorageSize(s64 *out) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(out != nullptr);
+
+ /* Get size. */
+ R_RETURN(m_data_storage.GetSize(out));
+ }
+
+ BucketTree &GetEntryTable() { return m_table; }
+
+ Result GetEntryList(Entry *out_entries, s32 *out_read_count, s32 max_entry_count, s64 offset, s64 size) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(offset >= 0);
+ AMS_ASSERT(size >= 0);
+ AMS_ASSERT(this->IsInitialized());
+
+ /* Check that we can output the count. */
+ R_UNLESS(out_read_count != nullptr, fs::ResultNullptrArgument());
+
+ /* Check that we have anything to read at all. */
+ R_SUCCEED_IF(size == 0);
+
+ /* Check that either we have a buffer, or this is to determine how many we need. */
+ if (max_entry_count != 0) {
+ R_UNLESS(out_entries != nullptr, fs::ResultNullptrArgument());
+ }
+
+ /* Get the table offsets. */
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ /* Validate arguments. */
+ R_UNLESS(table_offsets.IsInclude(offset, size), fs::ResultOutOfRange());
+
+ /* Find the offset in our tree. */
+ BucketTree::Visitor visitor;
+ R_TRY(m_table.Find(std::addressof(visitor), offset));
+ {
+ const auto entry_offset = visitor.Get()->virt_offset;
+ R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset), fs::ResultUnexpectedInCompressedStorageA());
+ }
+
+ /* Get the entries. */
+ const auto end_offset = offset + size;
+ s32 read_count = 0;
+ while (visitor.Get()->virt_offset < end_offset) {
+ /* If we should be setting the output, do so. */
+ if (max_entry_count != 0) {
+ /* Ensure we only read as many entries as we can. */
+ if (read_count >= max_entry_count) {
+ break;
+ }
+
+ /* Set the current output entry. */
+ out_entries[read_count] = *visitor.Get();
+ }
+
+ /* Increase the read count. */
+ ++read_count;
+
+ /* If we're at the end, we're done. */
+ if (!visitor.CanMoveNext()) {
+ break;
+ }
+
+ /* Move to the next entry. */
+ R_TRY(visitor.MoveNext());
+ }
+
+ /* Set the output read count. */
+ *out_read_count = read_count;
+ R_SUCCEED();
+ }
+
+ Result GetSize(s64 *out) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(out != nullptr);
+
+ /* Get our table offsets. */
+ BucketTree::Offsets offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(offsets)));
+
+ /* Set the output. */
+ *out = offsets.end_offset;
+ R_SUCCEED();
+ }
+
+ Result Invalidate() {
+ /* Invalidate our entry table. */
+ R_TRY(m_table.InvalidateCache());
+
+ /* Invalidate our data storage. */
+ R_TRY(m_data_storage.OperateRange(fs::OperationId::Invalidate, 0, std::numeric_limits::max()));
+
+ R_SUCCEED();
+ }
+
+ Result OperatePerEntry(s64 offset, s64 size, auto f) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(offset >= 0);
+ AMS_ASSERT(size >= 0);
+ AMS_ASSERT(this->IsInitialized());
+
+ /* Succeed if there's nothing to operate on. */
+ R_SUCCEED_IF(size == 0);
+
+ /* Get the table offsets. */
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ /* Validate arguments. */
+ R_UNLESS(table_offsets.IsInclude(offset, size), fs::ResultOutOfRange());
+
+ /* Find the offset in our tree. */
+ BucketTree::Visitor visitor;
+ R_TRY(m_table.Find(std::addressof(visitor), offset));
+ {
+ const auto entry_offset = visitor.Get()->virt_offset;
+ R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset), fs::ResultUnexpectedInCompressedStorageA());
+ }
+
+ /* Prepare to operate in chunks. */
+ auto cur_offset = offset;
+ const auto end_offset = offset + static_cast(size);
+
+ while (cur_offset < end_offset) {
+ /* Get the current entry. */
+ const auto cur_entry = *visitor.Get();
+
+ /* Get and validate the entry's offset. */
+ const auto cur_entry_offset = cur_entry.virt_offset;
+ R_UNLESS(cur_entry_offset <= cur_offset, fs::ResultUnexpectedInCompressedStorageA());
+
+ /* Get and validate the next entry offset. */
+ s64 next_entry_offset;
+ if (visitor.CanMoveNext()) {
+ R_TRY(visitor.MoveNext());
+ next_entry_offset = visitor.Get()->virt_offset;
+ R_UNLESS(table_offsets.IsInclude(next_entry_offset), fs::ResultUnexpectedInCompressedStorageA());
+ } else {
+ next_entry_offset = table_offsets.end_offset;
+ }
+ R_UNLESS(cur_offset < next_entry_offset, fs::ResultUnexpectedInCompressedStorageA());
+
+ /* Get the offset of the entry in the data we read. */
+ const auto data_offset = cur_offset - cur_entry_offset;
+ const auto data_size = (next_entry_offset - cur_entry_offset);
+ AMS_ASSERT(data_size > 0);
+
+ /* Determine how much is left. */
+ const auto remaining_size = end_offset - cur_offset;
+ const auto cur_size = std::min(remaining_size, data_size - data_offset);
+ AMS_ASSERT(cur_size <= size);
+
+ /* Get the data storage size. */
+ s64 storage_size = 0;
+ R_TRY(m_data_storage.GetSize(std::addressof(storage_size)));
+
+ /* Check that our read remains naively physically in bounds. */
+ R_UNLESS(0 <= cur_entry.phys_offset && cur_entry.phys_offset <= storage_size, fs::ResultUnexpectedInCompressedStorageC());
+
+ /* If we have any compression, verify that we remain physically in bounds. */
+ if (cur_entry.compression_type != CompressionType_None) {
+ R_UNLESS(cur_entry.phys_offset + cur_entry.GetPhysicalSize() <= storage_size, fs::ResultUnexpectedInCompressedStorageC());
+ }
+
+ /* Check that block alignment requirements are met. */
+ if (CompressionTypeUtility::IsBlockAlignmentRequired(cur_entry.compression_type)) {
+ R_UNLESS(util::IsAligned(cur_entry.phys_offset, CompressionBlockAlignment), fs::ResultUnexpectedInCompressedStorageA());
+ }
+
+ /* Invoke the operator. */
+ bool is_continuous = true;
+ R_TRY(f(std::addressof(is_continuous), cur_entry, data_size, data_offset, cur_size));
+
+ /* If not continuous, we're done. */
+ if (!is_continuous) {
+ break;
+ }
+
+ /* Advance. */
+ cur_offset += cur_size;
+ }
+
+ R_SUCCEED();
+ }
+
+ Result OperateRange(s64 offset, s64 size, auto f) {
+ /* Get the table offsets. */
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ /* Validate arguments. */
+ R_UNLESS(table_offsets.IsInclude(offset, size), fs::ResultOutOfRange());
+
+ /* If our table is empty, we have nothing to operate on. */
+ R_SUCCEED_IF(m_table.IsEmpty());
+
+ /* Operate on the range. */
+ s64 required_access_physical_offset = 0;
+ s64 required_access_physical_size = 0;
+ R_TRY(this->OperatePerEntry(offset, size, [&] (bool *out_continuous, const Entry &entry, s64 virtual_data_size, s64 data_offset, s64 read_size) -> Result {
+ AMS_UNUSED(virtual_data_size);
+
+ /* Determine the physical extents. */
+ s64 physical_offset, physical_size;
+ if (CompressionTypeUtility::IsRandomAccessible(entry.compression_type)) {
+ physical_offset = entry.phys_offset + data_offset;
+ physical_size = read_size;
+ } else {
+ physical_offset = entry.phys_offset;
+ physical_size = entry.GetPhysicalSize();
+ }
+
+ /* If we have a pending data storage operation, perform it if we have to. */
+ const s64 required_access_physical_end = required_access_physical_offset + required_access_physical_size;
+ if (required_access_physical_size > 0) {
+ /* Check that we can can coalesce this operation with the previous one; if we can't, we need to perform it. */
+ if (!(required_access_physical_end <= physical_offset && physical_offset <= util::AlignUp(required_access_physical_end, CompressionBlockAlignment))) {
+ R_TRY(f(required_access_physical_offset, required_access_physical_size));
+
+ required_access_physical_size = 0;
+ }
+ }
+
+ /* If we need to access the data storage, update our storage access parameters. */
+ if (CompressionTypeUtility::IsDataStorageAccessRequired(entry.compression_type)) {
+ /* Update the required access parameters. */
+ if (required_access_physical_size > 0) {
+ required_access_physical_size += physical_size + (physical_offset - required_access_physical_end);
+ } else {
+ required_access_physical_offset = physical_offset;
+ required_access_physical_size = physical_size;
+ }
+ } else {
+ /* Verify that we're allowed to be operating on the non-data-storage-access type. */
+ R_UNLESS(entry.compression_type == CompressionType_Zeros, fs::ResultUnexpectedInCompressedStorageB());
+ }
+
+ /* We're always continuous. */
+ *out_continuous = true;
+ R_SUCCEED();
+ }));
+
+ /* If we have a pending operation, perform it. */
+ if (required_access_physical_size > 0) {
+ R_TRY(f(required_access_physical_offset, required_access_physical_size));
+ }
+
+ R_SUCCEED();
+ }
+
+ Result QueryAppropriateOffsetForAsynchronousAccess(s64 *out, s64 offset, s64 access_size, s64 alignment_size) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(offset >= 0);
+ AMS_ASSERT(this->IsInitialized());
+
+ /* Check that we can write to the output. */
+ R_UNLESS(out != nullptr, fs::ResultNullptrArgument());
+
+ /* Get the table offsets. */
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ /* Validate arguments. */
+ R_UNLESS(table_offsets.IsInclude(offset, 1), fs::ResultOutOfRange());
+
+ /* Operate on the range. */
+ s64 required_access_physical_offset = 0;
+ s64 required_access_physical_size = 0;
+ s64 required_access_physical_end;
+
+ s64 appropriate_virtual_offset = offset;
+ R_TRY(this->OperatePerEntry(offset, table_offsets.end_offset - offset, [&] (bool *out_continuous, const Entry &entry, s64 virtual_data_size, s64 data_offset, s64 read_size) -> Result {
+ /* Determine the physical extents. */
+ s64 physical_offset, physical_size;
+ if (CompressionTypeUtility::IsRandomAccessible(entry.compression_type)) {
+ physical_offset = entry.phys_offset + data_offset;
+ physical_size = read_size;
+ } else {
+ physical_offset = entry.phys_offset;
+ physical_size = entry.GetPhysicalSize();
+ }
+
+ /* If we don't need to access the data storage, update our storage access parameters simply. */
+ if (!CompressionTypeUtility::IsDataStorageAccessRequired(entry.compression_type)) {
+ /* Verify that we're allowed to be operating on the non-data-storage-access type. */
+ R_UNLESS(entry.compression_type == CompressionType_Zeros, fs::ResultUnexpectedInCompressedStorageB());
+
+ /* No access is required, so we can advance the offset freely. */
+ appropriate_virtual_offset += read_size;
+
+ /* A read to zeros is always continuous. */
+ *out_continuous = true;
+ R_SUCCEED();
+ }
+
+ /* Update the required access parameters. */
+ if (required_access_physical_size > 0) {
+ /* Check that we can can coalesce this operation with the previous one; if we can't, we need to account for the gap. */
+ if ((required_access_physical_end <= physical_offset && physical_offset <= util::AlignUp(required_access_physical_end, CompressionBlockAlignment))) {
+ const s64 gap_size = physical_offset - required_access_physical_end;
+
+ if (required_access_physical_size + gap_size > access_size) {
+ *out_continuous = false;
+ R_SUCCEED();
+ }
+
+ required_access_physical_size += gap_size;
+ }
+ } else {
+ required_access_physical_offset = physical_offset;
+ }
+
+ /* If we're within the access bounds, we want to continue on. */
+ if (physical_size + required_access_physical_size <= access_size) {
+ required_access_physical_size += physical_size;
+ required_access_physical_end = physical_offset + physical_size;
+
+ appropriate_virtual_offset += read_size;
+ *out_continuous = true;
+ R_SUCCEED();
+ }
+
+ /* We're no longer within the access bounds, so we won't be continuous. */
+ *out_continuous = false;
+
+ /* Ensure we account for block alignment. */
+ if (CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type)) {
+ if (appropriate_virtual_offset == offset) {
+ appropriate_virtual_offset += read_size;
+ access_size = std::max(access_size, read_size);
+ }
+ } else {
+ /* Get the default splitter. */
+ auto * const default_splitter = fssystem::IAsynchronousAccessSplitter::GetDefaultAsynchronousAccessSplitter();
+
+ /* Query for an appropriate offset. */
+ s64 appropriate_physical_offset = 0;
+ R_TRY(default_splitter->QueryAppropriateOffset(std::addressof(appropriate_physical_offset), physical_offset, access_size - required_access_physical_size, alignment_size));
+
+ /* Use it, if we should. */
+ if (const auto gap_size = appropriate_physical_offset - physical_offset; gap_size > 0) {
+ appropriate_virtual_offset += gap_size;
+ required_access_physical_size += gap_size;
+ }
+ }
+
+ R_SUCCEED();
+ }));
+
+ /* Check that the offset is actually appropriate. */
+ AMS_ASSERT(offset <= appropriate_virtual_offset && appropriate_virtual_offset <= table_offsets.end_offset);
+ AMS_ASSERT(0 <= required_access_physical_size && required_access_physical_size <= access_size);
+
+ /* Set the output. */
+ *out = appropriate_virtual_offset;
+ R_SUCCEED();
+ }
+
+ Result QueryRange(void *dst, size_t dst_size, s64 offset, s64 size) {
+ /* Check arguments. */
+ R_UNLESS(dst != nullptr, fs::ResultNullptrArgument());
+ R_UNLESS(dst_size == sizeof(fs::QueryRangeInfo), fs::ResultInvalidArgument());
+
+ /* If we have nothing to query, succeed immediately. */
+ R_SUCCEED_IF(size <= 0);
+
+ /* Operate on the range. */
+ fs::QueryRangeInfo full_info;
+ full_info.Clear();
+
+ R_TRY(this->OperateRange(offset, size, [&](s64 offset, s64 size) -> Result {
+ /* Operate on our data storage. */
+ fs::QueryRangeInfo cur_info;
+ R_TRY(m_data_storage.OperateRange(std::addressof(cur_info), sizeof(cur_info), fs::OperationId::QueryRange, offset, size, nullptr, 0));
+
+ /* Merge the info. */
+ full_info.Merge(cur_info);
+ R_SUCCEED();
+ }));
+
+ R_SUCCEED();
+ }
+ public:
+ using ReadImplFunction = util::IFunction;
+ using ReadFunction = util::IFunction;
+ public:
+ Result Read(s64 offset, s64 size, const ReadFunction &read_func) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(offset >= 0);
+ AMS_ASSERT(this->IsInitialized());
+
+ /* Succeed immediately, if we hvae nothing to read. */
+ R_SUCCEED_IF(size == 0);
+
+ /* Declare read lambda. */
+ constexpr int EntriesCountMax = 0x80;
+ struct Entries {
+ CompressionType compression_type;
+ u32 gap_from_prev;
+ u32 physical_size;
+ u32 virtual_size;
+ };
+ Entries entries[EntriesCountMax];
+ s32 entry_count = 0;
+ Entry prev_entry = { .virt_offset = -1, };
+ bool will_allocate_pooled_buffer = false;
+ s64 required_access_physical_offset = 0;
+ s64 required_access_physical_size = 0;
+
+ auto PerformRequiredRead = [&]() -> Result {
+ /* If there are no entries, we have nothing to do. */
+ R_SUCCEED_IF(entry_count == 0);
+
+ /* Get the remaining size in a convenient form. */
+ const size_t total_required_size = static_cast(required_access_physical_size);
+
+ /* Perform the read based on whether we need to allocate a buffer. */
+ if (will_allocate_pooled_buffer) {
+ /* Allocate a pooled buffer. */
+ fssystem::PooledBuffer pooled_buffer;
+ if (pooled_buffer.GetAllocatableSizeMax() >= total_required_size) {
+ pooled_buffer.Allocate(total_required_size, m_block_size_max);
+ } else {
+ pooled_buffer.AllocateParticularlyLarge(std::min(total_required_size, PooledBuffer::GetAllocatableParticularlyLargeSizeMax()), m_block_size_max);
+ }
+
+ /* Read each of the entries. */
+ for (s32 entry_idx = 0; entry_idx < entry_count; ++entry_idx) {
+ /* Determine the current read size. */
+ bool will_use_pooled_buffer = false;
+ const size_t cur_read_size = [&] ALWAYS_INLINE_LAMBDA () -> size_t {
+ if (const size_t target_entry_size = static_cast(entries[entry_idx].physical_size) + static_cast(entries[entry_idx].gap_from_prev); target_entry_size <= pooled_buffer.GetSize()) {
+ /* We'll be using the pooled buffer. */
+ will_use_pooled_buffer = true;
+
+ /* Determine how much we can read. */
+ const size_t max_size = std::min(required_access_physical_size, pooled_buffer.GetSize());
+
+ size_t read_size = 0;
+ for (auto n = entry_idx; n < entry_count; ++n) {
+ const size_t cur_entry_size = static_cast(entries[n].physical_size) + static_cast(entries[n].gap_from_prev);
+ if (read_size + cur_entry_size > max_size) {
+ break;
+ }
+
+ read_size += cur_entry_size;
+ }
+
+ return read_size;
+ } else {
+ /* If we don't fit, we must be uncompressed. */
+ AMS_ASSERT(entries[entry_idx].compression_type == CompressionType_None);
+
+ /* We can perform the whole of an uncompressed read directly. */
+ return entries[entry_idx].virtual_size;
+ }
+ }();
+
+ /* Perform the read based on whether or not we'll use the pooled buffer. */
+ if (will_use_pooled_buffer) {
+ /* Read the compressed data into the pooled buffer. */
+ auto * const buffer = pooled_buffer.GetBuffer();
+ R_TRY(m_data_storage.Read(required_access_physical_offset, buffer, cur_read_size));
+
+ /* Temporarily increase our thread priority, while we decompress the data. */
+ ScopedThreadPriorityChanger cp(+1, ScopedThreadPriorityChanger::Mode::Relative);
+
+ /* Decompress the data. */
+ size_t buffer_offset;
+ for (buffer_offset = 0; entry_idx < entry_count && ((static_cast(entries[entry_idx].physical_size) + static_cast(entries[entry_idx].gap_from_prev)) == 0 || buffer_offset < cur_read_size); buffer_offset += entries[entry_idx++].physical_size) {
+ /* Advance by the relevant gap. */
+ buffer_offset += entries[entry_idx].gap_from_prev;
+
+ const auto compression_type = entries[entry_idx].compression_type;
+ switch (compression_type) {
+ case CompressionType_None:
+ {
+ /* Check that we can remain within bounds. */
+ AMS_ASSERT(buffer_offset + entries[entry_idx].virtual_size <= cur_read_size);
+
+ /* Perform no decompression. */
+ R_TRY(read_func(entries[entry_idx].virtual_size, util::MakeIFunction([&] (void *dst, size_t dst_size) -> Result {
+ /* Check that the size is valid. */
+ AMS_ASSERT(dst_size == entries[entry_idx].virtual_size);
+ AMS_UNUSED(dst_size);
+
+ /* We have no compression, so just copy the data out. */
+ std::memcpy(dst, buffer + buffer_offset, entries[entry_idx].virtual_size);
+ R_SUCCEED();
+ })));
+ }
+ break;
+ case CompressionType_Zeros:
+ {
+ /* Check that we can remain within bounds. */
+ AMS_ASSERT(buffer_offset <= cur_read_size);
+
+ /* Zero the memory. */
+ R_TRY(read_func(entries[entry_idx].virtual_size, util::MakeIFunction([&] (void *dst, size_t dst_size) -> Result {
+ /* Check that the size is valid. */
+ AMS_ASSERT(dst_size == entries[entry_idx].virtual_size);
+ AMS_UNUSED(dst_size);
+
+ /* The data is zeroes, so zero the buffer. */
+ std::memset(dst, 0, entries[entry_idx].virtual_size);
+ R_SUCCEED();
+ })));
+ }
+ break;
+ default:
+ {
+ /* Check that we can remain within bounds. */
+ AMS_ASSERT(buffer_offset + entries[entry_idx].physical_size <= cur_read_size);
+
+ /* Get the decompressor. */
+ const auto decompressor = this->GetDecompressor(compression_type);
+ R_UNLESS(decompressor != nullptr, fs::ResultUnexpectedInCompressedStorageB());
+
+ /* Decompress the data. */
+ R_TRY(read_func(entries[entry_idx].virtual_size, util::MakeIFunction([&] (void *dst, size_t dst_size) -> Result {
+ /* Check that the size is valid. */
+ AMS_ASSERT(dst_size == entries[entry_idx].virtual_size);
+ AMS_UNUSED(dst_size);
+
+ /* Perform the decompression. */
+ R_RETURN(decompressor(dst, entries[entry_idx].virtual_size, buffer + buffer_offset, entries[entry_idx].physical_size));
+ })));
+ }
+ break;
+ }
+ }
+
+ /* Check that we processed the correct amount of data. */
+ AMS_ASSERT(buffer_offset == cur_read_size);
+ } else {
+ /* Account for the gap from the previous entry. */
+ required_access_physical_offset += entries[entry_idx].gap_from_prev;
+ required_access_physical_size -= entries[entry_idx].gap_from_prev;
+
+ /* We don't need the buffer (as the data is uncompressed), so just execute the read. */
+ R_TRY(read_func(cur_read_size, util::MakeIFunction([&] (void *dst, size_t dst_size) -> Result {
+ /* Check that the size is valid. */
+ AMS_ASSERT(dst_size == cur_read_size);
+ AMS_UNUSED(dst_size);
+
+ /* Perform the read. */
+ R_RETURN(m_data_storage.Read(required_access_physical_offset, dst, cur_read_size));
+ })));
+ }
+
+ /* Advance on. */
+ required_access_physical_offset += cur_read_size;
+ required_access_physical_size -= cur_read_size;
+ }
+
+ /* Verify that we have nothing remaining to read. */
+ AMS_ASSERT(required_access_physical_size == 0);
+
+ R_SUCCEED();
+ } else {
+ /* We don't need a buffer, so just execute the read. */
+ R_TRY(read_func(total_required_size, util::MakeIFunction([&] (void *dst, size_t dst_size) -> Result {
+ /* Check that the size is valid. */
+ AMS_ASSERT(dst_size == total_required_size);
+ AMS_UNUSED(dst_size);
+
+ /* Perform the read. */
+ R_RETURN(m_data_storage.Read(required_access_physical_offset, dst, total_required_size));
+ })));
+ }
+
+ R_SUCCEED();
+ };
+
+ R_TRY(this->OperatePerEntry(offset, size, [&] (bool *out_continuous, const Entry &entry, s64 virtual_data_size, s64 data_offset, s64 read_size) -> Result {
+ /* Determine the physical extents. */
+ s64 physical_offset, physical_size;
+ if (CompressionTypeUtility::IsRandomAccessible(entry.compression_type)) {
+ physical_offset = entry.phys_offset + data_offset;
+ physical_size = read_size;
+ } else {
+ physical_offset = entry.phys_offset;
+ physical_size = entry.GetPhysicalSize();
+ }
+
+ /* If we have a pending data storage operation, perform it if we have to. */
+ const s64 required_access_physical_end = required_access_physical_offset + required_access_physical_size;
+ if (required_access_physical_size > 0) {
+ const bool required_by_gap = !(required_access_physical_end <= physical_offset && physical_offset <= util::AlignUp(required_access_physical_end, CompressionBlockAlignment));
+ const bool required_by_continuous_size = ((physical_size + physical_offset) - required_access_physical_end) + required_access_physical_size > m_continuous_reading_size_max;
+ const bool required_by_entry_count = entry_count == EntriesCountMax;
+ if (required_by_gap || required_by_continuous_size || required_by_entry_count) {
+ /* Check that our planned access is sane. */
+ AMS_ASSERT(!will_allocate_pooled_buffer || required_access_physical_size <= m_continuous_reading_size_max);
+
+ /* Perform the required read. */
+ R_TRY(PerformRequiredRead());
+
+ /* Reset our requirements. */
+ prev_entry.virt_offset = -1;
+ required_access_physical_size = 0;
+ entry_count = 0;
+ will_allocate_pooled_buffer = false;
+ }
+ }
+
+ /* Sanity check that we're within bounds on entries. */
+ AMS_ASSERT(entry_count < EntriesCountMax);
+
+ /* Determine if a buffer allocation is needed. */
+ if (entry.compression_type != CompressionType_None || (prev_entry.virt_offset >= 0 && entry.virt_offset - prev_entry.virt_offset != entry.phys_offset - prev_entry.phys_offset)) {
+ will_allocate_pooled_buffer = true;
+ }
+
+ /* If we need to access the data storage, update our required access parameters. */
+ if (CompressionTypeUtility::IsDataStorageAccessRequired(entry.compression_type)) {
+ /* If the data is compressed, ensure the access is sane. */
+ if (entry.compression_type != CompressionType_None) {
+ R_UNLESS(data_offset == 0, fs::ResultInvalidOffset());
+ R_UNLESS(virtual_data_size == read_size, fs::ResultInvalidSize());
+ R_UNLESS(entry.GetPhysicalSize() <= m_block_size_max, fs::ResultUnexpectedInCompressedStorageD());
+ }
+
+ /* Update the required access parameters. */
+ s64 gap_from_prev;
+ if (required_access_physical_size > 0) {
+ gap_from_prev = physical_offset - required_access_physical_end;
+ } else {
+ gap_from_prev = 0;
+ required_access_physical_offset = physical_offset;
+ }
+ required_access_physical_size += physical_size + gap_from_prev;
+
+ /* Create an entry. to access the data storage. */
+ entries[entry_count++] = {
+ .compression_type = entry.compression_type,
+ .gap_from_prev = static_cast(gap_from_prev),
+ .physical_size = static_cast(physical_size),
+ .virtual_size = static_cast(read_size),
+ };
+ } else {
+ /* Verify that we're allowed to be operating on the non-data-storage-access type. */
+ R_UNLESS(entry.compression_type == CompressionType_Zeros, fs::ResultUnexpectedInCompressedStorageB());
+
+ /* If we have entries, create a fake entry for the zero region. */
+ if (entry_count != 0) {
+ /* We need to have a physical size. */
+ R_UNLESS(entry.GetPhysicalSize() != 0, fs::ResultUnexpectedInCompressedStorageD());
+
+ /* Create a fake entry. */
+ entries[entry_count++] = {
+ .compression_type = CompressionType_Zeros,
+ .gap_from_prev = 0,
+ .physical_size = 0,
+ .virtual_size = static_cast(read_size),
+ };
+ } else {
+ /* We have no entries, we we can just perform the read. */
+ R_TRY(read_func(static_cast(read_size), util::MakeIFunction([&] (void *dst, size_t dst_size) -> Result {
+ /* Check the space we should zero is correct. */
+ AMS_ASSERT(dst_size == static_cast(read_size));
+ AMS_UNUSED(dst_size);
+
+ /* Zero the memory. */
+ std::memset(dst, 0, read_size);
+ R_SUCCEED();
+ })));
+ }
+ }
+
+
+ /* Set the previous entry. */
+ prev_entry = entry;
+
+ /* We're continuous. */
+ *out_continuous = true;
+ R_SUCCEED();
+ }));
+
+ /* If we still have a pending access, perform it. */
+ if (required_access_physical_size != 0) {
+ R_TRY(PerformRequiredRead());
+ }
+
+ R_SUCCEED();
+ }
+ private:
+ DecompressorFunction GetDecompressor(CompressionType type) const {
+ /* Check that we can get a decompressor for the type. */
+ if (CompressionTypeUtility::IsUnknownType(type)) {
+ return nullptr;
+ }
+
+ /* Get the decompressor. */
+ return m_get_decompressor_function(type);
+ }
+
+ bool IsInitialized() const {
+ return m_table.IsInitialized();
+ }
+ };
+
+ class CacheManager {
+ NON_COPYABLE(CacheManager);
+ NON_MOVEABLE(CacheManager);
+ private:
+ struct Range {
+ s64 offset;
+ size_t size;
+
+ s64 GetEndOffset() const {
+ return this->offset + this->size;
+ }
+
+ bool IsIncluded(s64 ofs) const {
+ return this->offset <= ofs && ofs < this->GetEndOffset();
+ }
+ };
+ static_assert(util::is_pod::value);
+
+ struct CacheEntry {
+ Range range;
+ fs::IBufferManager::CacheHandle handle;
+ uintptr_t memory_address;
+ u32 memory_size;
+ bool is_valid;
+ bool is_cached;
+ u16 lru_counter;
+
+ void Invalidate() {
+ /* ... */
+ }
+
+ bool IsAllocated() const {
+ return this->is_valid && this->handle != 0;
+ }
+
+ bool IsIncluded(s64 offset) const {
+ return this->is_valid && this->range.IsIncluded(offset);
+ }
+
+ bool IsWriteBack() const {
+ return false;
+ }
+ };
+ static_assert(util::is_pod::value);
+
+ struct AccessRange {
+ s64 virtual_offset;
+ s64 virtual_size;
+ u32 physical_size;
+ bool is_block_alignment_required;
+
+ s64 GetEndVirtualOffset() const {
+ return this->virtual_offset + this->virtual_size;
+ }
+ };
+ static_assert(util::is_pod::value);
+
+ using BlockCacheManager = ::ams::fssystem::impl::BlockCacheManager;
+ using CacheIndex = BlockCacheManager::CacheIndex;
+ private:
+ size_t m_cache_size_unk_0;
+ size_t m_cache_size_unk_1;
+ os::SdkMutex m_mutex;
+ BlockCacheManager m_block_cache_manager;
+ s64 m_storage_size = 0;
+ public:
+ CacheManager() = default;
+
+ ~CacheManager() { this->Finalize(); }
+ public:
+ Result Initialize(fs::IBufferManager *cache_allocator, s64 storage_size, size_t cache_size_0, size_t cache_size_1, size_t max_cache_entries) {
+ /* Initialize our block cache manager. */
+ R_TRY(m_block_cache_manager.Initialize(cache_allocator, max_cache_entries));
+
+ /* Set our fields. */
+ m_cache_size_unk_0 = cache_size_0;
+ m_cache_size_unk_1 = cache_size_1;
+ m_storage_size = storage_size;
+
+ R_SUCCEED();
+ }
+
+ void Finalize() {
+ /* If necessary, invalidate anything we have cached. */
+ if (m_block_cache_manager.IsInitialized()) {
+ this->Invalidate();
+ }
+
+ /* Finalize our block cache manager. */
+ m_block_cache_manager.Finalize();
+ }
+
+ void Invalidate() {
+ return m_block_cache_manager.Invalidate();
+ }
+
+ Result Read(CompressedStorageCore &core, s64 offset, void *buffer, size_t size) {
+ /* If we have nothing to read, succeed. */
+ R_SUCCEED_IF(size == 0);
+
+ /* Check that we have a buffer to read into. */
+ R_UNLESS(buffer != nullptr, fs::ResultNullptrArgument());
+
+ /* Check that the read is in bounds. */
+ R_UNLESS(offset <= m_storage_size, fs::ResultInvalidOffset());
+
+ /* Determine how much we can read. */
+ const size_t read_size = std::min(size, m_storage_size - offset);
+
+ /* Create head/tail ranges. */
+ AccessRange head_range = {};
+ AccessRange tail_range = {};
+ bool is_tail_set = false;
+
+ /* Operate to determine the head range. */
+ R_TRY(core.OperatePerEntry(offset, 1, [&] (bool *out_continuous, const Entry &entry, s64 virtual_data_size, s64 data_offset, s64 data_read_size) -> Result {
+ AMS_UNUSED(data_offset, data_read_size);
+
+ /* Set the head range. */
+ head_range = {
+ .virtual_offset = entry.virt_offset,
+ .virtual_size = virtual_data_size,
+ .physical_size = entry.phys_size,
+ .is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type),
+ };
+
+ /* If required, set the tail range. */
+ if ((offset + read_size) <= entry.virt_offset + virtual_data_size) {
+ tail_range = {
+ .virtual_offset = entry.virt_offset,
+ .virtual_size = virtual_data_size,
+ .physical_size = entry.phys_size,
+ .is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type),
+ };
+ is_tail_set = true;
+ }
+
+ /* We only want to determine the head range, so we're not continuous. */
+ *out_continuous = false;
+ R_SUCCEED();
+ }));
+
+ /* If necessary, determine the tail range. */
+ if (!is_tail_set) {
+ R_TRY(core.OperatePerEntry(offset + read_size - 1, 1, [&] (bool *out_continuous, const Entry &entry, s64 virtual_data_size, s64 data_offset, s64 data_read_size) -> Result {
+ AMS_UNUSED(data_offset, data_read_size);
+
+ /* Set the tail range. */
+ tail_range = {
+ .virtual_offset = entry.virt_offset,
+ .virtual_size = virtual_data_size,
+ .physical_size = entry.phys_size,
+ .is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type),
+ };
+
+ /* We only want to determine the tail range, so we're not continuous. */
+ *out_continuous = false;
+ R_SUCCEED();
+ }));
+ }
+
+ /* Begin performing the accesses. */
+ s64 cur_offset = offset;
+ size_t cur_size = read_size;
+ char *cur_dst = static_cast(buffer);
+
+ /* If we can use the head/tail cache, do so. */
+ if (m_block_cache_manager.GetCount() > 0) {
+ /* Read the head cache. */
+ R_TRY(this->ReadHeadCache(core, cur_offset, cur_dst, cur_size, head_range, tail_range));
+
+ /* If we're now done, succeed. */
+ R_SUCCEED_IF(cur_size == 0);
+
+ /* Read the tail cache. */
+ R_TRY(this->ReadTailCache(core, cur_offset, cur_dst, cur_size, head_range, tail_range));
+
+ /* If we're now done, succeed. */
+ R_SUCCEED_IF(cur_size == 0);
+ }
+
+ /* Determine our alignment. */
+ const bool head_unaligned = head_range.is_block_alignment_required && (cur_offset != head_range.virtual_offset || cur_size < head_range.virtual_size);
+ const bool tail_unaligned = [&] ALWAYS_INLINE_LAMBDA () -> bool {
+ if (tail_range.is_block_alignment_required) {
+ if (cur_size + cur_offset == tail_range.GetEndVirtualOffset()) {
+ return false;
+ } else if (!head_unaligned) {
+ return true;
+ } else {
+ return cur_size + cur_offset < head_range.GetEndVirtualOffset();
+ }
+ } else {
+ return false;
+ }
+ }();
+
+ /* Determine start/end offsets. */
+ const s64 start_offset = head_range.is_block_alignment_required ? head_range.virtual_offset : cur_offset;
+ const s64 end_offset = tail_range.is_block_alignment_required ? tail_range.GetEndVirtualOffset() : cur_offset + cur_size;
+
+ /* Perform the read. */
+ bool is_burst_reading = false;
+ R_TRY(core.Read(start_offset, end_offset - start_offset, util::MakeIFunction([&] (size_t size_buffer_required, const CompressedStorageCore::ReadImplFunction &read_impl) -> Result {
+ /* Determine whether we're burst reading. */
+ const AccessRange *unaligned_range = nullptr;
+ if (!is_burst_reading) {
+ /* Check whether we're using head, tail, or none as unaligned. */
+ if (head_unaligned && head_range.virtual_offset <= cur_offset && cur_offset < head_range.GetEndVirtualOffset()) {
+ unaligned_range = std::addressof(head_range);
+ } else if (tail_unaligned && tail_range.virtual_offset <= cur_offset && cur_offset < tail_range.GetEndVirtualOffset()) {
+ unaligned_range = std::addressof(tail_range);
+ } else {
+ is_burst_reading = true;
+ }
+ }
+ AMS_ASSERT((is_burst_reading ^ (unaligned_range != nullptr)));
+
+ /* Perform reading by burst, or not. */
+ if (is_burst_reading) {
+ /* Check that the access is valid for burst reading. */
+ AMS_ASSERT(size_buffer_required <= cur_size);
+
+ /* Perform the read. */
+ R_TRY(read_impl(cur_dst, size_buffer_required));
+
+ /* Advance. */
+ cur_dst += size_buffer_required;
+ cur_offset += size_buffer_required;
+ cur_size -= size_buffer_required;
+
+ /* Determine whether we're going to continue burst reading. */
+ const s64 offset_aligned = tail_unaligned ? tail_range.virtual_offset : end_offset;
+ AMS_ASSERT(cur_offset <= offset_aligned);
+
+ if (offset_aligned <= cur_offset) {
+ is_burst_reading = false;
+ }
+ } else {
+ /* We're not burst reading, so we have some unaligned range. */
+ AMS_ASSERT(unaligned_range != nullptr);
+
+ /* Check that the size is correct. */
+ AMS_ASSERT(size_buffer_required == static_cast(unaligned_range->virtual_size));
+
+ /* Get a pooled buffer for our read. */
+ fssystem::PooledBuffer pooled_buffer;
+ pooled_buffer.Allocate(size_buffer_required, size_buffer_required);
+
+ /* Perform read. */
+ R_TRY(read_impl(pooled_buffer.GetBuffer(), size_buffer_required));
+
+ /* Copy the data we read to the destination. */
+ const size_t skip_size = cur_offset - unaligned_range->virtual_offset;
+ const size_t copy_size = std::min(cur_size, unaligned_range->GetEndVirtualOffset() - cur_offset);
+
+ std::memcpy(cur_dst, pooled_buffer.GetBuffer() + skip_size, copy_size);
+
+ /* Advance. */
+ cur_dst += copy_size;
+ cur_offset += copy_size;
+ cur_size -= copy_size;
+
+ /* If we should, cache what we read. */
+ if (m_block_cache_manager.GetCount() > 0 && unaligned_range->physical_size > m_cache_size_unk_1) {
+ CacheEntry entry;
+ for (s64 ofs = unaligned_range->virtual_offset; ofs < unaligned_range->GetEndVirtualOffset(); ofs += entry.range.size) {
+ /* Find or allocate buffer. */
+ fs::IBufferManager::MemoryRange memory_range;
+ R_TRY(this->FindOrAllocateBuffer(std::addressof(memory_range), std::addressof(entry), ofs, unaligned_range->GetEndVirtualOffset() - ofs));
+
+ /* If not cached, cache the data. */
+ if (!entry.is_cached) {
+ std::memcpy(reinterpret_cast(memory_range.first), pooled_buffer.GetBuffer() + (ofs - unaligned_range->virtual_offset), entry.range.size);
+ entry.is_cached = true;
+ }
+
+ /* Store the associated buffer. */
+ this->StoreAssociateBuffer(memory_range, entry);
+ }
+ }
+ }
+
+ R_SUCCEED();
+ })));
+
+ R_SUCCEED();
+ }
+ private:
+ Result FindBuffer(fs::IBufferManager::MemoryRange *out, CacheEntry *out_entry, s64 offset) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(m_block_cache_manager.IsInitialized());
+ AMS_ASSERT(out != nullptr);
+ AMS_ASSERT(out_entry != nullptr);
+
+ /* Find the buffer. */
+ R_RETURN(this->FindBufferImpl(out, out_entry, offset));
+ }
+
+ Result FindBufferImpl(fs::IBufferManager::MemoryRange *out, CacheEntry *out_entry, s64 offset) {
+ /* Get our block cache count */
+ const auto count = m_block_cache_manager.GetCount();
+
+ /* Try to find the buffer. */
+ CacheIndex index;
+ for (index = 0; index < count; ++index) {
+ if (const auto &buffer = m_block_cache_manager[index]; buffer.IsAllocated() && buffer.IsIncluded(offset)) {
+ break;
+ }
+ }
+
+ /* Set the output. */
+ if (index != count) {
+ /* Acquire the entry. */
+ m_block_cache_manager.AcquireCacheEntry(out_entry, out, index);
+ if (out->first == 0) {
+ *out = {};
+ *out_entry = {};
+ }
+ } else {
+ *out = {};
+ *out_entry = {};
+ }
+
+ R_SUCCEED();
+ }
+
+ Result FindOrAllocateBuffer(fs::IBufferManager::MemoryRange *out, CacheEntry *out_entry, s64 offset, size_t max_range_size) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(m_block_cache_manager.IsInitialized());
+ AMS_ASSERT(out != nullptr);
+ AMS_ASSERT(out_entry != nullptr);
+
+ /* Acquire exclusive access to our block cache manager. */
+ std::scoped_lock lk(m_mutex);
+
+ /* Try to find the buffer. */
+ R_TRY(this->FindBufferImpl(out, out_entry, offset));
+
+ /* Determine the range size. */
+ const size_t range_size = std::min(max_range_size, m_cache_size_unk_0);
+
+ /* If necessary, allocate. */
+ if (out->first == 0) {
+ R_TRY(fssystem::buffers::AllocateBufferUsingBufferManagerContext(out, m_block_cache_manager.GetAllocator(), range_size, fs::IBufferManager::BufferAttribute(0x20), [] (const fs::IBufferManager::MemoryRange &buffer) -> bool {
+ return buffer.first != 0;
+ }, AMS_CURRENT_FUNCTION_NAME));
+
+ /* Set the entry for the allocated buffer. */
+ out_entry->is_valid = out->first != 0;
+ out_entry->is_cached = false;
+ out_entry->handle = 0;
+ out_entry->memory_address = 0;
+ out_entry->memory_size = 0;
+ out_entry->range.offset = offset;
+ out_entry->range.size = range_size;
+ out_entry->lru_counter = 0;
+ }
+
+ /* Check that the result is valid. */
+ AMS_ASSERT(out_entry->range.size <= out->second);
+
+ R_SUCCEED();
+ }
+
+ Result ReadHeadCache(CompressedStorageCore &core, s64 &offset, char *&buffer, size_t &size, AccessRange &head_range, const AccessRange &tail_range) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(buffer != nullptr);
+
+ /* Read until we're done with the head cache */
+ while (head_range.virtual_size > 0 && head_range.virtual_offset < tail_range.GetEndVirtualOffset()) {
+ /* Cache the access extents. */
+ s64 access_offset = offset;
+ char *access_buf = buffer;
+ size_t access_size = size;
+
+ /* Determine the current access extents. */
+ s64 cur_offset = head_range.virtual_offset + util::AlignDown(access_offset - head_range.virtual_offset, m_cache_size_unk_0);
+ while (cur_offset < head_range.GetEndVirtualOffset() && cur_offset < offset + size) {
+ /* Find the relevant entry. */
+ fs::IBufferManager::MemoryRange memory_range = {};
+ CacheEntry entry = {};
+ R_TRY(this->FindBuffer(std::addressof(memory_range), std::addressof(entry), cur_offset));
+
+ /* If the entry isn't cached, we're done. */
+ R_SUCCEED_IF(!entry.is_cached);
+
+ /* Otherwise, copy the cacheed data. */
+ const size_t copy_size = std::min(access_size, entry.range.GetEndOffset() - access_offset);
+
+ std::memcpy(access_buf, reinterpret_cast(memory_range.first + access_offset - entry.range.offset), copy_size);
+
+ /* Advance. */
+ access_buf += copy_size;
+ access_offset += copy_size;
+ access_size -= copy_size;
+
+ cur_offset += entry.range.size;
+
+ /* Store the associated buffer. */
+ this->StoreAssociateBuffer(memory_range, entry);
+ }
+
+ /* Update the output extents. */
+ buffer = access_buf;
+ offset = access_offset;
+ size = access_size;
+
+ /* Determine the new head range. */
+ AccessRange new_head_range = {
+ .virtual_offset = head_range.GetEndVirtualOffset(),
+ .virtual_size = 0,
+ .physical_size = 0,
+ .is_block_alignment_required = true,
+ };
+ if (head_range.GetEndVirtualOffset() == tail_range.virtual_offset) {
+ /* We can use the tail range directly. */
+ new_head_range.virtual_size = tail_range.virtual_size;
+ new_head_range.physical_size = tail_range.physical_size;
+ new_head_range.is_block_alignment_required = tail_range.is_block_alignment_required;
+ } else if (head_range.GetEndVirtualOffset() < tail_range.GetEndVirtualOffset()) {
+ /* We need to find the new head range. */
+ R_TRY(core.OperatePerEntry(new_head_range.virtual_offset, 1, [&] (bool *out_continuous, const Entry &entry, s64 virtual_data_size, s64 data_offset, s64 data_read_size) -> Result {
+ AMS_UNUSED(data_offset, data_read_size);
+
+ /* If we can, use the current entry. */
+ if (entry.virt_offset < tail_range.GetEndVirtualOffset()) {
+ new_head_range = {
+ .virtual_offset = entry.virt_offset,
+ .virtual_size = virtual_data_size,
+ .physical_size = entry.phys_size,
+ .is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type),
+ };
+ }
+
+ /* We only want to determine the new head range, so we're not continuous. */
+ *out_continuous = false;
+ R_SUCCEED();
+ }));
+ }
+
+ /* Update the head range. */
+ head_range = new_head_range;
+ }
+
+ R_SUCCEED();
+ }
+
+ Result ReadTailCache(CompressedStorageCore &core, s64 offset, char *buffer, size_t &size, const AccessRange &head_range, AccessRange &tail_range) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(buffer != nullptr);
+
+ /* Read until we're done with the tail cache */
+ while (tail_range.virtual_offset >= offset) {
+ /* Loop reading, while we can. */
+ const s64 dst_end_offset = offset + size;
+ s64 cur_offset = tail_range.virtual_offset;
+ while (cur_offset < dst_end_offset) {
+ /* Find the relevant entry. */
+ fs::IBufferManager::MemoryRange memory_range = {};
+ CacheEntry entry = {};
+ R_TRY(this->FindBuffer(std::addressof(memory_range), std::addressof(entry), cur_offset));
+
+ /* If the entry isn't cached, we're done. */
+ R_SUCCEED_IF(!entry.is_cached);
+
+ /* Sanity check our current access. */
+ AMS_ASSERT(offset <= entry.range.offset);
+
+ /* Copy the cacheed data. */
+ const s64 cur_end_offset = std::min(dst_end_offset, entry.range.GetEndOffset());
+
+ std::memcpy(buffer + entry.range.offset - offset, reinterpret_cast(memory_range.first), cur_end_offset - entry.range.offset);
+
+ /* Advance. */
+ cur_offset += entry.range.size;
+
+ /* Store the associated buffer. */
+ this->StoreAssociateBuffer(memory_range, entry);
+ }
+
+ /* Update the output extents. */
+ size -= std::min(dst_end_offset, tail_range.GetEndVirtualOffset()) - tail_range.virtual_offset;
+
+ /* Update the tail range. */
+ bool new_tail_found = false;
+ if (tail_range.virtual_offset - 1 >= 0) {
+ /* We need to find the new tail range. */
+ R_TRY(core.OperatePerEntry(tail_range.virtual_offset - 1, 1, [&] (bool *out_continuous, const Entry &entry, s64 virtual_data_size, s64 data_offset, s64 data_read_size) -> Result {
+ AMS_UNUSED(data_offset, data_read_size);
+
+ /* If we can, use the current entry. */
+ if (head_range.virtual_offset != entry.virt_offset) {
+ tail_range = {
+ .virtual_offset = entry.virt_offset,
+ .virtual_size = virtual_data_size,
+ .physical_size = entry.phys_size,
+ .is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type),
+ };
+
+ new_tail_found = true;
+ }
+
+ /* We only want to determine the new head range, so we're not continuous. */
+ *out_continuous = false;
+ R_SUCCEED();
+ }));
+ }
+
+ /* If we didn't find a new tail, write a default (and we're done). */
+ if (!new_tail_found) {
+ tail_range = {
+ .virtual_offset = tail_range.virtual_offset,
+ .virtual_size = 0,
+ .physical_size = 0,
+ .is_block_alignment_required = true,
+ };
+ break;
+ }
+ }
+
+ R_SUCCEED();
+ }
+
+ void StoreAssociateBuffer(const fs::IBufferManager::MemoryRange &memory_range, const CacheEntry &entry) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(m_block_cache_manager.GetCount() > 0);
+
+ /* Acquire exclusive access to our manager. */
+ std::scoped_lock lk(m_mutex);
+
+ /* Get empty cache index. */
+ CacheIndex empty_index, lru_index;
+ m_block_cache_manager.GetEmptyCacheEntryIndex(std::addressof(empty_index), std::addressof(lru_index));
+
+ /* If nothing is empty, invalidate the least recently used entry. */
+ if (empty_index == BlockCacheManager::InvalidCacheIndex) {
+ m_block_cache_manager.InvalidateCacheEntry(lru_index);
+ empty_index = lru_index;
+ }
+
+ /* Set the entry. */
+ m_block_cache_manager.SetCacheEntry(empty_index, entry, memory_range);
+ }
+ };
+ private:
+ CompressedStorageCore m_core;
+ CacheManager m_cache_manager;
public:
- CompressedStorage() { /* ... */ }
+ CompressedStorage() = default;
virtual ~CompressedStorage() { this->Finalize(); }
- Result Initialize(MemoryResource *bktr_allocator, IBufferManager *cache_allocator, fs::SubStorage data_storage, fs::SubStorage node_storage, fs::SubStorage entry_storage, s32 bktr_entry_count, size_t block_size_max, size_t continuous_reading_size_max, GetDecompressorFunction get_decompressor, size_t cache_size_0, size_t cache_size_1, s32 max_cache_entries) {
- AMS_UNUSED(bktr_allocator, cache_allocator, data_storage, node_storage, entry_storage, bktr_entry_count, block_size_max, continuous_reading_size_max, get_decompressor, cache_size_0, cache_size_1, max_cache_entries);
- AMS_ABORT("TODO");
+ Result Initialize(MemoryResource *bktr_allocator, fs::IBufferManager *cache_allocator, fs::SubStorage data_storage, fs::SubStorage node_storage, fs::SubStorage entry_storage, s32 bktr_entry_count, size_t block_size_max, size_t continuous_reading_size_max, GetDecompressorFunction get_decompressor, size_t cache_size_0, size_t cache_size_1, s32 max_cache_entries) {
+ /* Initialize our core. */
+ R_TRY(m_core.Initialize(bktr_allocator, data_storage, node_storage, entry_storage, bktr_entry_count, block_size_max, continuous_reading_size_max, get_decompressor));
+
+ /* Get our core size. */
+ s64 core_size = 0;
+ R_TRY(m_core.GetSize(std::addressof(core_size)));
+
+ /* Initialize our cache manager. */
+ R_TRY(m_cache_manager.Initialize(cache_allocator, core_size, cache_size_0, cache_size_1, max_cache_entries));
+
+ R_SUCCEED();
}
void Finalize() {
- AMS_ABORT("TODO");
- /* m_cache_manager.Finalize(); */
- /* m_core.Finalize(); */
+ m_cache_manager.Finalize();
+ m_core.Finalize();
+ }
+
+ fs::IStorage *GetDataStorage() {
+ return m_core.GetDataStorage();
+ }
+
+ Result GetDataStorageSize(s64 *out) {
+ R_RETURN(m_core.GetDataStorageSize(out));
+ }
+
+ Result GetEntryList(Entry *out_entries, s32 *out_read_count, s32 max_entry_count, s64 offset, s64 size) {
+ R_RETURN(m_core.GetEntryList(out_entries, out_read_count, max_entry_count, offset, size));
+ }
+
+ fssystem::BucketTree &GetEntryTable() {
+ return m_core.GetEntryTable();
}
public:
virtual Result QueryAppropriateOffset(s64 *out, s64 offset, s64 access_size, s64 alignment_size) override {
- AMS_ABORT("TODO");
- AMS_UNUSED(out, offset, access_size, alignment_size);
- /* return m_core.QueryAppropriateOffsetForAsynchronousAccess(out, offset, access_size, alignment_size); */
+ R_RETURN(m_core.QueryAppropriateOffsetForAsynchronousAccess(out, offset, access_size, alignment_size));
}
public:
- virtual Result Read(s64 offset, void *buffer, size_t size) override { AMS_UNUSED(offset, buffer, size); AMS_ABORT("TODO"); }
- virtual Result OperateRange(void *dst, size_t dst_size, fs::OperationId op_id, s64 offset, s64 size, const void *src, size_t src_size) override { AMS_UNUSED(dst, dst_size, op_id, offset, size, src, src_size); AMS_ABORT("TODO"); }
+ virtual Result Read(s64 offset, void *buffer, size_t size) override {
+ R_RETURN(m_cache_manager.Read(m_core, offset, buffer, size));
+ }
+
+ virtual Result OperateRange(void *dst, size_t dst_size, fs::OperationId op_id, s64 offset, s64 size, const void *src, size_t src_size) override {
+ /* Check pre-conditions. */
+ AMS_ASSERT(offset >= 0);
+ AMS_ASSERT(size >= 0);
+
+ /* Perform the operation. */
+ switch (op_id) {
+ case fs::OperationId::Invalidate:
+ m_cache_manager.Invalidate();
+ R_TRY(m_core.Invalidate());
+ break;
+ case fs::OperationId::QueryRange:
+ R_TRY(m_core.QueryRange(dst, dst_size, offset, size));
+ break;
+ default:
+ R_THROW(fs::ResultUnsupportedOperationInCompressedStorageB());
+ }
+
+ R_SUCCEED();
+ }
virtual Result GetSize(s64 *out) override {
- AMS_ABORT("TODO");
- AMS_UNUSED(out);
- /* return m_core.GetSize(out); */
+ R_RETURN(m_core.GetSize(out));
}
virtual Result Flush() override {
- return ResultSuccess();
+ R_SUCCEED();
}
virtual Result Write(s64 offset, const void *buffer, size_t size) override {
AMS_UNUSED(offset, buffer, size);
- return fs::ResultUnsupportedOperationInCompressedStorageA();
+ R_THROW(fs::ResultUnsupportedOperationInCompressedStorageA());
}
virtual Result SetSize(s64 size) override {
AMS_UNUSED(size);
/* NOTE: Is Nintendo returning the wrong result here? */
- return fs::ResultUnsupportedOperationInIndirectStorageB();
+ R_THROW(fs::ResultUnsupportedOperationInIndirectStorageB());
}
};
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_compression_common.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_compression_common.hpp
index b7d875a0a..4b22428d8 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_compression_common.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_compression_common.hpp
@@ -18,9 +18,9 @@
namespace ams::fssystem {
- enum CompressionType {
+ enum CompressionType : u8 {
CompressionType_None = 0,
- CompressionType_1 = 1,
+ CompressionType_Zeros = 1,
CompressionType_2 = 2,
CompressionType_Lz4 = 3,
CompressionType_Unknown = 4,
@@ -29,14 +29,16 @@ namespace ams::fssystem {
using DecompressorFunction = Result (*)(void *, size_t, const void *, size_t);
using GetDecompressorFunction = DecompressorFunction (*)(CompressionType);
+ constexpr s64 CompressionBlockAlignment = 0x10;
+
namespace CompressionTypeUtility {
constexpr bool IsBlockAlignmentRequired(CompressionType type) {
- return type != CompressionType_None && type != CompressionType_1;
+ return type != CompressionType_None && type != CompressionType_Zeros;
}
constexpr bool IsDataStorageAccessRequired(CompressionType type) {
- return type != CompressionType_1;
+ return type != CompressionType_Zeros;
}
constexpr bool IsRandomAccessible(CompressionType type) {
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage.hpp
index 293c91be1..e14dd4658 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage.hpp
@@ -133,22 +133,26 @@ namespace ams::fssystem {
virtual Result GetSize(s64 *out) override {
AMS_ASSERT(out != nullptr);
- *out = m_table.GetEnd();
- return ResultSuccess();
+
+ BucketTree::Offsets offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(offsets)));
+
+ *out = offsets.end_offset;
+ R_SUCCEED();
}
virtual Result Flush() override {
- return ResultSuccess();
+ R_SUCCEED();
}
virtual Result Write(s64 offset, const void *buffer, size_t size) override {
AMS_UNUSED(offset, buffer, size);
- return fs::ResultUnsupportedOperationInIndirectStorageA();
+ R_THROW(fs::ResultUnsupportedOperationInIndirectStorageA());
}
virtual Result SetSize(s64 size) override {
AMS_UNUSED(size);
- return fs::ResultUnsupportedOperationInIndirectStorageB();
+ R_THROW(fs::ResultUnsupportedOperationInIndirectStorageB());
}
protected:
BucketTree &GetEntryTable() { return m_table; }
@@ -158,7 +162,7 @@ namespace ams::fssystem {
return m_data_storage[index];
}
- template
+ template
Result OperatePerEntry(s64 offset, s64 size, F func);
};
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage_template_impl.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage_template_impl.hpp
index f241c0b57..52243dd82 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage_template_impl.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage_template_impl.hpp
@@ -18,7 +18,7 @@
namespace ams::fssystem {
- template
+ template
Result IndirectStorage::OperatePerEntry(s64 offset, s64 size, F func) {
/* Validate preconditions. */
AMS_ASSERT(offset >= 0);
@@ -28,15 +28,19 @@ namespace ams::fssystem {
/* Succeed if there's nothing to operate on. */
R_SUCCEED_IF(size == 0);
+ /* Get the table offsets. */
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
/* Validate arguments. */
- R_UNLESS(m_table.Includes(offset, size), fs::ResultOutOfRange());
+ R_UNLESS(table_offsets.IsInclude(offset, size), fs::ResultOutOfRange());
/* Find the offset in our tree. */
BucketTree::Visitor visitor;
R_TRY(m_table.Find(std::addressof(visitor), offset));
{
const auto entry_offset = visitor.Get()->GetVirtualOffset();
- R_UNLESS(0 <= entry_offset && m_table.Includes(entry_offset), fs::ResultInvalidIndirectEntryOffset());
+ R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset), fs::ResultInvalidIndirectEntryOffset());
}
/* Prepare to operate in chunks. */
@@ -67,16 +71,21 @@ namespace ams::fssystem {
/* Ensure that we can process. */
R_UNLESS(cur_entry.storage_index == 0, fs::ResultInvalidIndirectEntryStorageIndex());
- /* Get the current data storage's size. */
- s64 cur_data_storage_size;
- R_TRY(m_data_storage[0].GetSize(std::addressof(cur_data_storage_size)));
/* Ensure that we remain within range. */
const auto data_offset = cur_offset - cur_entry_offset;
const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset();
const auto cur_size = static_cast(cr_info.GetReadSize());
- R_UNLESS(0 <= cur_entry_phys_offset && cur_entry_phys_offset <= cur_data_storage_size, fs::ResultInvalidIndirectEntryOffset());
- R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size, fs::ResultInvalidIndirectStorageSize());
+
+ /* If we should, verify the range. */
+ if constexpr (RangeCheck) {
+ /* Get the current data storage's size. */
+ s64 cur_data_storage_size;
+ R_TRY(m_data_storage[0].GetSize(std::addressof(cur_data_storage_size)));
+
+ R_UNLESS(0 <= cur_entry_phys_offset && cur_entry_phys_offset <= cur_data_storage_size, fs::ResultInvalidIndirectEntryOffset());
+ R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size, fs::ResultInvalidIndirectStorageSize());
+ }
/* Operate. */
R_TRY(func(std::addressof(m_data_storage[0]), cur_entry_phys_offset + data_offset, cur_offset, cur_size));
@@ -91,20 +100,20 @@ namespace ams::fssystem {
if (visitor.CanMoveNext()) {
R_TRY(visitor.MoveNext());
next_entry_offset = visitor.Get()->GetVirtualOffset();
- R_UNLESS(m_table.Includes(next_entry_offset), fs::ResultInvalidIndirectEntryOffset());
+ R_UNLESS(table_offsets.IsInclude(next_entry_offset), fs::ResultInvalidIndirectEntryOffset());
} else {
- next_entry_offset = m_table.GetEnd();
+ next_entry_offset = table_offsets.end_offset;
}
R_UNLESS(cur_offset < next_entry_offset, fs::ResultInvalidIndirectEntryOffset());
/* Get the offset of the entry in the data we read. */
const auto data_offset = cur_offset - cur_entry_offset;
- const auto data_size = (next_entry_offset - cur_entry_offset) - data_offset;
+ const auto data_size = (next_entry_offset - cur_entry_offset);
AMS_ASSERT(data_size > 0);
/* Determine how much is left. */
const auto remaining_size = end_offset - cur_offset;
- const auto cur_size = std::min(remaining_size, data_size);
+ const auto cur_size = std::min(remaining_size, data_size - data_offset);
AMS_ASSERT(cur_size <= size);
/* Operate, if we need to. */
@@ -116,14 +125,17 @@ namespace ams::fssystem {
}
if (needs_operate) {
- /* Get the current data storage's size. */
- s64 cur_data_storage_size;
- R_TRY(m_data_storage[cur_entry.storage_index].GetSize(std::addressof(cur_data_storage_size)));
-
- /* Ensure that we remain within range. */
const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset();
- R_UNLESS(0 <= cur_entry_phys_offset && cur_entry_phys_offset <= cur_data_storage_size, fs::ResultIndirectStorageCorrupted());
- R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size, fs::ResultIndirectStorageCorrupted());
+
+ if constexpr (RangeCheck) {
+ /* Get the current data storage's size. */
+ s64 cur_data_storage_size;
+ R_TRY(m_data_storage[cur_entry.storage_index].GetSize(std::addressof(cur_data_storage_size)));
+
+ /* Ensure that we remain within range. */
+ R_UNLESS(0 <= cur_entry_phys_offset && cur_entry_phys_offset <= cur_data_storage_size, fs::ResultIndirectStorageCorrupted());
+ R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size, fs::ResultIndirectStorageCorrupted());
+ }
R_TRY(func(std::addressof(m_data_storage[cur_entry.storage_index]), cur_entry_phys_offset + data_offset, cur_offset, cur_size));
}
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_integrity_romfs_storage.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_integrity_romfs_storage.hpp
index e863776cb..cc888a49c 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_integrity_romfs_storage.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_integrity_romfs_storage.hpp
@@ -37,7 +37,7 @@ namespace ams::fssystem {
IntegrityRomFsStorage() : m_mutex() { /* ... */ }
virtual ~IntegrityRomFsStorage() override { this->Finalize(); }
- Result Initialize(save::HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash, save::HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info, IBufferManager *bm, IHash256GeneratorFactory *hgf);
+ Result Initialize(save::HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash, save::HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info, fs::IBufferManager *bm, IHash256GeneratorFactory *hgf);
void Finalize();
virtual Result Read(s64 offset, void *buffer, size_t size) override {
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_nca_file_system_driver.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_nca_file_system_driver.hpp
index 41469894f..80bf9d442 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_nca_file_system_driver.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_nca_file_system_driver.hpp
@@ -21,7 +21,7 @@
#include
#include
#include
-#include
+#include
namespace ams::fssystem {
@@ -228,17 +228,17 @@ namespace ams::fssystem {
std::shared_ptr m_original_reader;
std::shared_ptr m_reader;
MemoryResource * const m_allocator;
- fssystem::IBufferManager * const m_buffer_manager;
+ fs::IBufferManager * const m_buffer_manager;
fssystem::IHash256GeneratorFactorySelector * const m_hash_generator_factory_selector;
public:
static Result SetupFsHeaderReader(NcaFsHeaderReader *out, const NcaReader &reader, s32 fs_index);
public:
- NcaFileSystemDriver(std::shared_ptr reader, MemoryResource *allocator, IBufferManager *buffer_manager, IHash256GeneratorFactorySelector *hgf_selector) : m_original_reader(), m_reader(reader), m_allocator(allocator), m_buffer_manager(buffer_manager), m_hash_generator_factory_selector(hgf_selector) {
+ NcaFileSystemDriver(std::shared_ptr reader, MemoryResource *allocator, fs::IBufferManager *buffer_manager, IHash256GeneratorFactorySelector *hgf_selector) : m_original_reader(), m_reader(reader), m_allocator(allocator), m_buffer_manager(buffer_manager), m_hash_generator_factory_selector(hgf_selector) {
AMS_ASSERT(m_reader != nullptr);
AMS_ASSERT(m_hash_generator_factory_selector != nullptr);
}
- NcaFileSystemDriver(std::shared_ptr original_reader, std::shared_ptr reader, MemoryResource *allocator, IBufferManager *buffer_manager, IHash256GeneratorFactorySelector *hgf_selector) : m_original_reader(original_reader), m_reader(reader), m_allocator(allocator), m_buffer_manager(buffer_manager), m_hash_generator_factory_selector(hgf_selector) {
+ NcaFileSystemDriver(std::shared_ptr original_reader, std::shared_ptr reader, MemoryResource *allocator, fs::IBufferManager *buffer_manager, IHash256GeneratorFactorySelector *hgf_selector) : m_original_reader(original_reader), m_reader(reader), m_allocator(allocator), m_buffer_manager(buffer_manager), m_hash_generator_factory_selector(hgf_selector) {
AMS_ASSERT(m_reader != nullptr);
AMS_ASSERT(m_hash_generator_factory_selector != nullptr);
}
@@ -278,7 +278,7 @@ namespace ams::fssystem {
Result CreateCompressedStorage(std::shared_ptr *out, std::shared_ptr *out_cmp, std::shared_ptr *out_meta, std::shared_ptr base_storage, const NcaCompressionInfo &compression_info);
public:
- Result CreateCompressedStorage(std::shared_ptr *out, std::shared_ptr *out_cmp, std::shared_ptr *out_meta, std::shared_ptr base_storage, const NcaCompressionInfo &compression_info, GetDecompressorFunction get_decompressor, MemoryResource *allocator, IBufferManager *buffer_manager);
+ Result CreateCompressedStorage(std::shared_ptr *out, std::shared_ptr *out_cmp, std::shared_ptr *out_meta, std::shared_ptr base_storage, const NcaCompressionInfo &compression_info, GetDecompressorFunction get_decompressor, MemoryResource *allocator, fs::IBufferManager *buffer_manager);
};
}
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_sparse_storage.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_sparse_storage.hpp
index 61c7cc0b3..d0d406ce5 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_sparse_storage.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_sparse_storage.hpp
@@ -36,32 +36,32 @@ namespace ams::fssystem {
if (size > 0) {
std::memset(buffer, 0, size);
}
- return ResultSuccess();
+ R_SUCCEED();
}
virtual Result OperateRange(void *dst, size_t dst_size, fs::OperationId op_id, s64 offset, s64 size, const void *src, size_t src_size) override {
AMS_UNUSED(dst, dst_size, op_id, offset, size, src, src_size);
- return ResultSuccess();
+ R_SUCCEED();
}
virtual Result GetSize(s64 *out) override {
AMS_ASSERT(out != nullptr);
*out = std::numeric_limits::max();
- return ResultSuccess();
+ R_SUCCEED();
}
virtual Result Flush() override {
- return ResultSuccess();
+ R_SUCCEED();
}
virtual Result Write(s64 offset, const void *buffer, size_t size) override {
AMS_UNUSED(offset, buffer, size);
- return fs::ResultUnsupportedOperationInZeroStorageA();
+ R_THROW(fs::ResultUnsupportedOperationInZeroStorageA());
}
virtual Result SetSize(s64 size) override {
AMS_UNUSED(size);
- return fs::ResultUnsupportedOperationInZeroStorageB();
+ R_THROW(fs::ResultUnsupportedOperationInZeroStorageB());
}
};
private:
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/impl/fssystem_block_cache_manager.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/impl/fssystem_block_cache_manager.hpp
new file mode 100644
index 000000000..1fcdc5970
--- /dev/null
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/impl/fssystem_block_cache_manager.hpp
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include
+#include
+
+namespace ams::fssystem::impl {
+
+ template
+ class BlockCacheManager {
+ NON_COPYABLE(BlockCacheManager);
+ NON_MOVEABLE(BlockCacheManager);
+ public:
+ using MemoryRange = AllocatorType::MemoryRange;
+ using CacheIndex = s32;
+
+ using BufferAttribute = AllocatorType::BufferAttribute;
+
+ static constexpr CacheIndex InvalidCacheIndex = -1;
+
+ using CacheEntry = CacheEntryType;
+ static_assert(util::is_pod::value);
+ private:
+ AllocatorType *m_allocator = nullptr;
+ std::unique_ptr m_entries{};
+ s32 m_max_cache_entry_count = 0;
+ public:
+ constexpr BlockCacheManager() = default;
+ public:
+ Result Initialize(AllocatorType *allocator, s32 max_entries) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(m_allocator == nullptr);
+ AMS_ASSERT(m_entries == nullptr);
+ AMS_ASSERT(allocator != nullptr);
+
+ /* Setup our entries buffer, if necessary. */
+ if (max_entries > 0) {
+ /* Create the entries. */
+ m_entries = fs::impl::MakeUnique(static_cast(max_entries));
+ R_UNLESS(m_entries != nullptr, fs::ResultAllocationFailureInMakeUnique());
+
+ /* Clear the entries. */
+ std::memset(m_entries.get(), 0, sizeof(CacheEntry) * max_entries);
+ }
+
+ /* Set fields. */
+ m_allocator = allocator;
+ m_max_cache_entry_count = max_entries;
+
+ R_SUCCEED();
+ }
+
+ void Finalize() {
+ /* Reset all fields. */
+ m_entries.reset(nullptr);
+ m_allocator = nullptr;
+ m_max_cache_entry_count = 0;
+ }
+
+ bool IsInitialized() const {
+ return m_allocator != nullptr;
+ }
+
+ AllocatorType *GetAllocator() { return m_allocator; }
+ s32 GetCount() const { return m_max_cache_entry_count; }
+
+ void AcquireCacheEntry(CacheEntry *out_entry, MemoryRange *out_range, CacheIndex index) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(this->IsInitialized());
+ AMS_ASSERT(index < this->GetCount());
+
+ /* Get the entry. */
+ auto &entry = m_entries[index];
+
+ /* Set the out range. */
+ if (entry.IsWriteBack()) {
+ *out_range = AllocatorType::MakeMemoryRange(entry.memory_address, entry.memory_size);
+ } else {
+ *out_range = m_allocator->AcquireCache(entry.handle);
+ }
+
+ /* Set the out entry. */
+ *out_entry = entry;
+
+ /* Sanity check. */
+ AMS_ASSERT(out_entry->is_valid);
+ AMS_ASSERT(out_entry->is_cached);
+
+ /* Clear our local entry. */
+ entry.is_valid = false;
+ entry.handle = 0;
+ entry.memory_address = 0;
+ entry.memory_size = 0;
+ entry.lru_counter = 0;
+
+ /* Update the out entry. */
+ out_entry->is_valid = true;
+ out_entry->handle = 0;
+ out_entry->memory_address = 0;
+ out_entry->memory_size = 0;
+ out_entry->lru_counter = 0;
+ }
+
+ bool ExistsRedundantCacheEntry(const CacheEntry &entry) const {
+ /* Check pre-conditions. */
+ AMS_ASSERT(this->IsInitialized());
+
+ /* Iterate over all entries, checking if any contain our extents. */
+ for (auto i = 0; i < this->GetCount(); ++i) {
+ if (const auto &cur_entry = m_entries[i]; cur_entry.IsAllocated()) {
+ if (cur_entry.range.offset < entry.range.GetEndOffset() && entry.range.offset < cur_entry.range.GetEndOffset()) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ void GetEmptyCacheEntryIndex(CacheIndex *out_empty, CacheIndex *out_lru) {
+ /* Find empty and lru indices. */
+ CacheIndex empty = InvalidCacheIndex, lru = InvalidCacheIndex;
+ for (auto i = 0; i < this->GetCount(); ++i) {
+ if (auto &entry = m_entries[i]; entry.is_valid) {
+ /* Get/Update the lru counter. */
+ if (entry.lru_counter != std::numeric_limits::max()) {
+ ++entry.lru_counter;
+ }
+
+ /* Update the lru index. */
+ if (lru == InvalidCacheIndex || m_entries[lru].lru_counter < entry.lru_counter) {
+ lru = i;
+ }
+ } else {
+ /* The entry is invalid, so we can update the empty index. */
+ if (empty == InvalidCacheIndex) {
+ empty = i;
+ }
+ }
+ }
+
+ /* Set the output. */
+ *out_empty = empty;
+ *out_lru = lru;
+ }
+
+ void Invalidate() {
+ /* Check pre-conditions. */
+ AMS_ASSERT(this->IsInitialized());
+
+ /* Invalidate all entries. */
+ for (auto i = 0; i < this->GetCount(); ++i) {
+ if (m_entries[i].is_valid) {
+ this->InvalidateCacheEntry(i);
+ }
+ }
+ }
+
+ void InvalidateCacheEntry(CacheIndex index) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(this->IsInitialized());
+ AMS_ASSERT(index < this->GetCount());
+
+ /* Get the entry. */
+ auto &entry = m_entries[index];
+ AMS_ASSERT(entry.is_valid);
+
+ /* If necessary, perform write-back. */
+ if (entry.IsWriteBack()) {
+ AMS_ASSERT(entry.memory_address != 0 && entry.handle == 0);
+ m_allocator->DeallocateBuffer(AllocatorType::MakeMemoryRange(entry.memory_address, entry.memory_size));
+ } else {
+ AMS_ASSERT(entry.memory_address == 0 && entry.handle != 0);
+
+ if (const auto memory_range = m_allocator->AcquireCache(entry.handle); memory_range.first) {
+ m_allocator->DeallocateBuffer(memory_range);
+ }
+ }
+
+ /* Set entry as invalid. */
+ entry.is_valid = false;
+ entry.Invalidate();
+ }
+
+ void RegisterCacheEntry(CacheIndex index, const MemoryRange &memory_range, const BufferAttribute &attribute) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(this->IsInitialized());
+
+ /* Register the entry. */
+ if (auto &entry = m_entries[index]; entry.IsWriteBack()) {
+ entry.handle = 0;
+ entry.memory_address = memory_range.first;
+ entry.memory_size = memory_range.second;
+ } else {
+ entry.handle = m_allocator->RegisterCache(memory_range, attribute);
+ entry.memory_address = 0;
+ entry.memory_size = 0;
+ }
+ }
+
+ void ReleaseCacheEntry(CacheEntry *entry, const MemoryRange &memory_range) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(this->IsInitialized());
+
+ /* Release the entry. */
+ m_allocator->DeallocateBuffer(memory_range);
+ entry->is_valid = false;
+ entry->is_cached = false;
+ }
+
+ void ReleaseCacheEntry(CacheIndex index, const MemoryRange &memory_range) {
+ return this->ReleaseCacheEntry(std::addressof(m_entries[index]), memory_range);
+ }
+
+ bool SetCacheEntry(CacheIndex index, const CacheEntry &entry, const MemoryRange &memory_range, const BufferAttribute &attr) {
+ /* Check pre-conditions. */
+ AMS_ASSERT(this->IsInitialized());
+ AMS_ASSERT(0 <= index && index < this->GetCount());
+
+ /* Write the entry. */
+ m_entries[index] = entry;
+
+ /* Sanity check. */
+ AMS_ASSERT(entry.is_valid);
+ AMS_ASSERT(entry.is_cached);
+ AMS_ASSERT(entry.handle == 0);
+ AMS_ASSERT(entry.memory_address == 0);
+
+ /* Register or release. */
+ if (this->ExistsRedundantCacheEntry(entry)) {
+ this->ReleaseCacheEntry(index, memory_range);
+ return false;
+ } else {
+ this->RegisterCacheEntry(index, memory_range, attr);
+ return true;
+ }
+ }
+
+ bool SetCacheEntry(CacheIndex index, const CacheEntry &entry, const MemoryRange &memory_range) {
+ const BufferAttribute attr{};
+ return this->SetCacheEntry(index, entry, memory_range, attr);
+ }
+
+ void SetFlushing(CacheIndex index, bool en) {
+ if constexpr (requires { m_entries[index].is_flushing; }) {
+ m_entries[index].is_flushing = en;
+ }
+ }
+
+ void SetWriteBack(CacheIndex index, bool en) {
+ if constexpr (requires { m_entries[index].is_write_back; }) {
+ m_entries[index].is_write_back = en;
+ }
+ }
+
+ const CacheEntry &operator[](CacheIndex index) const {
+ /* Check pre-conditions. */
+ AMS_ASSERT(this->IsInitialized());
+ AMS_ASSERT(0 <= index && index < this->GetCount());
+
+ return m_entries[index];
+ }
+ };
+
+}
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_block_cache_buffered_storage.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_block_cache_buffered_storage.hpp
index 815c7fd77..fb875c2d1 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_block_cache_buffered_storage.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_block_cache_buffered_storage.hpp
@@ -21,6 +21,7 @@
#include
#include
#include
+#include
namespace ams::fssystem::save {
@@ -30,7 +31,7 @@ namespace ams::fssystem::save {
constexpr inline size_t IntegrityLayerCountSaveDataMeta = 4;
struct FileSystemBufferManagerSet {
- IBufferManager *buffers[IntegrityMaxLayerCount];
+ fs::IBufferManager *buffers[IntegrityMaxLayerCount];
};
static_assert(util::is_pod::value);
@@ -40,51 +41,77 @@ namespace ams::fssystem::save {
public:
static constexpr size_t DefaultMaxCacheEntryCount = 24;
private:
- using MemoryRange = std::pair;
- using CacheIndex = s32;
+ using MemoryRange = fs::IBufferManager::MemoryRange;
+
+ struct AccessRange {
+ s64 offset;
+ size_t size;
+
+ s64 GetEndOffset() const {
+ return this->offset + this->size;
+ }
+
+ bool IsIncluded(s64 ofs) const {
+ return this->offset <= ofs && ofs < this->GetEndOffset();
+ }
+ };
+ static_assert(util::is_pod::value);
struct CacheEntry {
- size_t size;
+ AccessRange range;
bool is_valid;
bool is_write_back;
bool is_cached;
bool is_flushing;
- s64 offset;
- IBufferManager::CacheHandle handle;
+ u16 lru_counter;
+ fs::IBufferManager::CacheHandle handle;
uintptr_t memory_address;
size_t memory_size;
+
+ void Invalidate() {
+ this->is_write_back = false;
+ this->is_flushing = false;
+ }
+
+ bool IsAllocated() const {
+ return this->is_valid && (this->is_write_back ? this->memory_address != 0 : this->handle != 0);
+ }
+
+ bool IsWriteBack() const {
+ return this->is_write_back;
+ }
};
static_assert(util::is_pod::value);
+ using BlockCacheManager = ::ams::fssystem::impl::BlockCacheManager;
+ using CacheIndex = BlockCacheManager::CacheIndex;
+
enum Flag : s32 {
Flag_KeepBurstMode = (1 << 8),
Flag_RealData = (1 << 10),
};
private:
- IBufferManager *m_buffer_manager;
os::SdkRecursiveMutex *m_mutex;
- std::unique_ptr m_entries;
IStorage *m_data_storage;
Result m_last_result;
s64 m_data_size;
size_t m_verification_block_size;
size_t m_verification_block_shift;
- CacheIndex m_invalidate_index;
- s32 m_max_cache_entry_count;
s32 m_flags;
s32 m_buffer_level;
fs::StorageType m_storage_type;
+ BlockCacheManager m_block_cache_manager;
public:
BlockCacheBufferedStorage();
virtual ~BlockCacheBufferedStorage() override;
- Result Initialize(IBufferManager *bm, os::SdkRecursiveMutex *mtx, IStorage *data, s64 data_size, size_t verif_block_size, s32 max_cache_entries, bool is_real_data, s8 buffer_level, bool is_keep_burst_mode, fs::StorageType storage_type);
+ Result Initialize(fs::IBufferManager *bm, os::SdkRecursiveMutex *mtx, IStorage *data, s64 data_size, size_t verif_block_size, s32 max_cache_entries, bool is_real_data, s8 buffer_level, bool is_keep_burst_mode, fs::StorageType storage_type);
void Finalize();
virtual Result Read(s64 offset, void *buffer, size_t size) override;
virtual Result Write(s64 offset, const void *buffer, size_t size) override;
- virtual Result SetSize(s64 size) override { AMS_UNUSED(size); return fs::ResultUnsupportedOperationInBlockCacheBufferedStorageA(); }
+ virtual Result SetSize(s64) override { R_THROW(fs::ResultUnsupportedOperationInBlockCacheBufferedStorageA()); }
virtual Result GetSize(s64 *out) override;
virtual Result Flush() override;
@@ -119,40 +146,24 @@ namespace ams::fssystem::save {
}
}
private:
- s32 GetMaxCacheEntryCount() const {
- return m_max_cache_entry_count;
- }
-
- Result ClearImpl(s64 offset, s64 size);
- Result ClearSignatureImpl(s64 offset, s64 size);
- Result InvalidateCacheImpl(s64 offset, s64 size);
+ Result FillZeroImpl(s64 offset, s64 size);
+ Result DestroySignatureImpl(s64 offset, s64 size);
+ Result InvalidateImpl();
Result QueryRangeImpl(void *dst, size_t dst_size, s64 offset, s64 size);
- bool ExistsRedundantCacheEntry(const CacheEntry &entry) const;
-
Result GetAssociateBuffer(MemoryRange *out_range, CacheEntry *out_entry, s64 offset, size_t ideal_size, bool is_allocate_for_write);
- void DestroyBuffer(CacheEntry *entry, const MemoryRange &range);
-
- Result StoreAssociateBuffer(CacheIndex *out, const MemoryRange &range, const CacheEntry &entry);
- Result StoreAssociateBuffer(const MemoryRange &range, const CacheEntry &entry) {
- CacheIndex dummy;
- return this->StoreAssociateBuffer(std::addressof(dummy), range, entry);
- }
+ Result StoreOrDestroyBuffer(CacheIndex *out, const MemoryRange &range, CacheEntry *entry);
Result StoreOrDestroyBuffer(const MemoryRange &range, CacheEntry *entry) {
AMS_ASSERT(entry != nullptr);
- ON_RESULT_FAILURE { this->DestroyBuffer(entry, range); };
-
- R_TRY(this->StoreAssociateBuffer(range, *entry));
-
- R_SUCCEED();
+ CacheIndex dummy;
+ R_RETURN(this->StoreOrDestroyBuffer(std::addressof(dummy), range, entry));
}
Result FlushCacheEntry(CacheIndex index, bool invalidate);
Result FlushRangeCacheEntries(s64 offset, s64 size, bool invalidate);
- void InvalidateRangeCacheEntries(s64 offset, s64 size);
Result FlushAllCacheEntries();
Result InvalidateAllCacheEntries();
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_buffered_storage.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_buffered_storage.hpp
index f7367326f..9fdaec99f 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_buffered_storage.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_buffered_storage.hpp
@@ -18,7 +18,7 @@
#include
#include
#include
-#include
+#include
namespace ams::fssystem::save {
@@ -31,7 +31,7 @@ namespace ams::fssystem::save {
class SharedCache;
private:
fs::SubStorage m_base_storage;
- IBufferManager *m_buffer_manager;
+ fs::IBufferManager *m_buffer_manager;
size_t m_block_size;
s64 m_base_storage_size;
std::unique_ptr m_caches;
@@ -44,7 +44,7 @@ namespace ams::fssystem::save {
BufferedStorage();
virtual ~BufferedStorage();
- Result Initialize(fs::SubStorage base_storage, IBufferManager *buffer_manager, size_t block_size, s32 buffer_count);
+ Result Initialize(fs::SubStorage base_storage, fs::IBufferManager *buffer_manager, size_t block_size, s32 buffer_count);
void Finalize();
bool IsInitialized() const { return m_caches != nullptr; }
@@ -61,7 +61,7 @@ namespace ams::fssystem::save {
void InvalidateCaches();
- IBufferManager *GetBufferManager() const { return m_buffer_manager; }
+ fs::IBufferManager *GetBufferManager() const { return m_buffer_manager; }
void EnableBulkRead() { m_bulk_read_enabled = true; }
private:
diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_integrity_verification_storage.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_integrity_verification_storage.hpp
index e7b7ced05..b162af5e5 100644
--- a/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_integrity_verification_storage.hpp
+++ b/libraries/libstratosphere/include/stratosphere/fssystem/save/fssystem_integrity_verification_storage.hpp
@@ -43,7 +43,7 @@ namespace ams::fssystem::save {
s64 m_verification_block_order;
s64 m_upper_layer_verification_block_size;
s64 m_upper_layer_verification_block_order;
- IBufferManager *m_buffer_manager;
+ fs::IBufferManager *m_buffer_manager;
fs::HashSalt m_salt;
bool m_is_real_data;
fs::StorageType m_storage_type;
@@ -52,7 +52,7 @@ namespace ams::fssystem::save {
IntegrityVerificationStorage() : m_verification_block_size(0), m_verification_block_order(0), m_upper_layer_verification_block_size(0), m_upper_layer_verification_block_order(0), m_buffer_manager(nullptr) { /* ... */ }
virtual ~IntegrityVerificationStorage() override { this->Finalize(); }
- Result Initialize(fs::SubStorage hs, fs::SubStorage ds, s64 verif_block_size, s64 upper_layer_verif_block_size, IBufferManager *bm, fssystem::IHash256GeneratorFactory *hgf, const fs::HashSalt &salt, bool is_real_data, fs::StorageType storage_type);
+ Result Initialize(fs::SubStorage hs, fs::SubStorage ds, s64 verif_block_size, s64 upper_layer_verif_block_size, fs::IBufferManager *bm, fssystem::IHash256GeneratorFactory *hgf, const fs::HashSalt &salt, bool is_real_data, fs::StorageType storage_type);
void Finalize();
virtual Result Read(s64 offset, void *buffer, size_t size) override;
diff --git a/libraries/libstratosphere/source/fssystem/buffers/fssystem_file_system_buffer_manager.cpp b/libraries/libstratosphere/source/fssystem/buffers/fssystem_file_system_buffer_manager.cpp
index 8333d947a..80ef95134 100644
--- a/libraries/libstratosphere/source/fssystem/buffers/fssystem_file_system_buffer_manager.cpp
+++ b/libraries/libstratosphere/source/fssystem/buffers/fssystem_file_system_buffer_manager.cpp
@@ -239,10 +239,10 @@ namespace ams::fssystem {
return it != m_attr_list.end() ? std::addressof(*it) : nullptr;
}
- const std::pair FileSystemBufferManager::AllocateBufferImpl(size_t size, const BufferAttribute &attr) {
+ const fs::IBufferManager::MemoryRange FileSystemBufferManager::DoAllocateBuffer(size_t size, const BufferAttribute &attr) {
std::scoped_lock lk(m_mutex);
- std::pair range = {};
+ fs::IBufferManager::MemoryRange range = {};
const auto order = m_buddy_heap.GetOrderFromBytes(size);
AMS_ASSERT(order >= 0);
@@ -277,7 +277,7 @@ namespace ams::fssystem {
return range;
}
- void FileSystemBufferManager::DeallocateBufferImpl(uintptr_t address, size_t size) {
+ void FileSystemBufferManager::DoDeallocateBuffer(uintptr_t address, size_t size) {
AMS_ASSERT(util::IsPowerOfTwo(size));
std::scoped_lock lk(m_mutex);
@@ -285,7 +285,7 @@ namespace ams::fssystem {
m_buddy_heap.Free(reinterpret_cast(address), m_buddy_heap.GetOrderFromBytes(size));
}
- FileSystemBufferManager::CacheHandle FileSystemBufferManager::RegisterCacheImpl(uintptr_t address, size_t size, const BufferAttribute &attr) {
+ FileSystemBufferManager::CacheHandle FileSystemBufferManager::DoRegisterCache(uintptr_t address, size_t size, const BufferAttribute &attr) {
std::scoped_lock lk(m_mutex);
CacheHandle handle = 0;
@@ -312,10 +312,10 @@ namespace ams::fssystem {
return handle;
}
- const std::pair FileSystemBufferManager::AcquireCacheImpl(CacheHandle handle) {
+ const fs::IBufferManager::MemoryRange FileSystemBufferManager::DoAcquireCache(CacheHandle handle) {
std::scoped_lock lk(m_mutex);
- std::pair range = {};
+ fs::IBufferManager::MemoryRange range = {};
if (m_cache_handle_table.Unregister(std::addressof(range.first), std::addressof(range.second), handle)) {
const size_t total_allocatable_size = m_buddy_heap.GetTotalFreeSize() + m_cache_handle_table.GetTotalCacheSize();
m_peak_total_allocatable_size = std::min(m_peak_total_allocatable_size, total_allocatable_size);
@@ -327,33 +327,33 @@ namespace ams::fssystem {
return range;
}
- size_t FileSystemBufferManager::GetTotalSizeImpl() const {
+ size_t FileSystemBufferManager::DoGetTotalSize() const {
return m_total_size;
}
- size_t FileSystemBufferManager::GetFreeSizeImpl() const {
+ size_t FileSystemBufferManager::DoGetFreeSize() const {
std::scoped_lock lk(m_mutex);
return m_buddy_heap.GetTotalFreeSize();
}
- size_t FileSystemBufferManager::GetTotalAllocatableSizeImpl() const {
+ size_t FileSystemBufferManager::DoGetTotalAllocatableSize() const {
return this->GetFreeSize() + m_cache_handle_table.GetTotalCacheSize();
}
- size_t FileSystemBufferManager::GetPeakFreeSizeImpl() const {
+ size_t FileSystemBufferManager::DoGetFreeSizePeak() const {
return m_peak_free_size;
}
- size_t FileSystemBufferManager::GetPeakTotalAllocatableSizeImpl() const {
+ size_t FileSystemBufferManager::DoGetTotalAllocatableSizePeak() const {
return m_peak_total_allocatable_size;
}
- size_t FileSystemBufferManager::GetRetriedCountImpl() const {
+ size_t FileSystemBufferManager::DoGetRetriedCount() const {
return m_retried_count;
}
- void FileSystemBufferManager::ClearPeakImpl() {
+ void FileSystemBufferManager::DoClearPeak() {
m_peak_free_size = this->GetFreeSize();
m_retried_count = 0;
}
diff --git a/libraries/libstratosphere/source/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp b/libraries/libstratosphere/source/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
index b595a4e66..d9de29af3 100644
--- a/libraries/libstratosphere/source/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
+++ b/libraries/libstratosphere/source/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
@@ -117,7 +117,11 @@ namespace ams::fssystem {
R_UNLESS(buffer != nullptr, fs::ResultNullptrArgument());
R_UNLESS(util::IsAligned(offset, BlockSize), fs::ResultInvalidOffset());
R_UNLESS(util::IsAligned(size, BlockSize), fs::ResultInvalidSize());
- R_UNLESS(m_table.Includes(offset, size), fs::ResultOutOfRange());
+
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ R_UNLESS(table_offsets.IsInclude(offset, size), fs::ResultOutOfRange());
/* Read the data. */
R_TRY(m_data_storage.Read(offset, buffer, size));
@@ -130,8 +134,8 @@ namespace ams::fssystem {
R_TRY(m_table.Find(std::addressof(visitor), offset));
{
const auto entry_offset = visitor.Get()->GetOffset();
- R_UNLESS(util::IsAligned(entry_offset, BlockSize), fs::ResultInvalidAesCtrCounterExtendedEntryOffset());
- R_UNLESS(0 <= entry_offset && m_table.Includes(entry_offset), fs::ResultInvalidAesCtrCounterExtendedEntryOffset());
+ R_UNLESS(util::IsAligned(entry_offset, BlockSize), fs::ResultInvalidAesCtrCounterExtendedEntryOffset());
+ R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset), fs::ResultInvalidAesCtrCounterExtendedEntryOffset());
}
/* Prepare to read in chunks. */
@@ -152,9 +156,9 @@ namespace ams::fssystem {
if (visitor.CanMoveNext()) {
R_TRY(visitor.MoveNext());
next_entry_offset = visitor.Get()->GetOffset();
- R_UNLESS(m_table.Includes(next_entry_offset), fs::ResultInvalidAesCtrCounterExtendedEntryOffset());
+ R_UNLESS(table_offsets.IsInclude(next_entry_offset), fs::ResultInvalidAesCtrCounterExtendedEntryOffset());
} else {
- next_entry_offset = m_table.GetEnd();
+ next_entry_offset = table_offsets.end_offset;
}
R_UNLESS(util::IsAligned(next_entry_offset, BlockSize), fs::ResultInvalidAesCtrCounterExtendedEntryOffset());
R_UNLESS(cur_offset < next_entry_offset, fs::ResultInvalidAesCtrCounterExtendedEntryOffset());
@@ -192,22 +196,13 @@ namespace ams::fssystem {
case fs::OperationId::Invalidate:
{
/* Validate preconditions. */
- AMS_ASSERT(offset >= 0);
AMS_ASSERT(this->IsInitialized());
- /* Succeed if there's nothing to operate on. */
- R_SUCCEED_IF(size == 0);
-
- /* Validate arguments. */
- R_UNLESS(util::IsAligned(offset, BlockSize), fs::ResultInvalidOffset());
- R_UNLESS(util::IsAligned(size, BlockSize), fs::ResultInvalidSize());
- R_UNLESS(m_table.Includes(offset, size), fs::ResultOutOfRange());
-
/* Invalidate our table's cache. */
R_TRY(m_table.InvalidateCache());
/* Operate on our data storage. */
- R_TRY(m_data_storage.OperateRange(dst, dst_size, op_id, offset, size, src, src_size));
+ R_TRY(m_data_storage.OperateRange(fs::OperationId::Invalidate, 0, std::numeric_limits::max()));
return ResultSuccess();
}
@@ -230,7 +225,11 @@ namespace ams::fssystem {
/* Validate arguments. */
R_UNLESS(util::IsAligned(offset, BlockSize), fs::ResultInvalidOffset());
R_UNLESS(util::IsAligned(size, BlockSize), fs::ResultInvalidSize());
- R_UNLESS(m_table.Includes(offset, size), fs::ResultOutOfRange());
+
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ R_UNLESS(table_offsets.IsInclude(offset, size), fs::ResultOutOfRange());
/* Operate on our data storage. */
R_TRY(m_data_storage.OperateRange(dst, dst_size, op_id, offset, size, src, src_size));
diff --git a/libraries/libstratosphere/source/fssystem/fssystem_alignment_matching_storage_impl.cpp b/libraries/libstratosphere/source/fssystem/fssystem_alignment_matching_storage_impl.cpp
index e8a8b449f..37306a6af 100644
--- a/libraries/libstratosphere/source/fssystem/fssystem_alignment_matching_storage_impl.cpp
+++ b/libraries/libstratosphere/source/fssystem/fssystem_alignment_matching_storage_impl.cpp
@@ -249,9 +249,10 @@ namespace ams::fssystem {
/* Handle any data after the aligned portion. */
if (core_offset_end < offset_end) {
- const auto tail_size = static_cast(offset_end - core_offset_end);
+ const auto tail_buffer = static_cast(buffer) + (core_offset_end - offset);
+ const auto tail_size = static_cast(offset_end - core_offset_end);
R_TRY(m_base_storage->Read(core_offset_end, pooled_buffer.GetBuffer(), m_data_align));
- std::memcpy(buffer, pooled_buffer.GetBuffer(), tail_size);
+ std::memcpy(tail_buffer, pooled_buffer.GetBuffer(), tail_size);
}
return ResultSuccess();
diff --git a/libraries/libstratosphere/source/fssystem/fssystem_bucket_tree.cpp b/libraries/libstratosphere/source/fssystem/fssystem_bucket_tree.cpp
index e66083f5f..89d30c78b 100644
--- a/libraries/libstratosphere/source/fssystem/fssystem_bucket_tree.cpp
+++ b/libraries/libstratosphere/source/fssystem/fssystem_bucket_tree.cpp
@@ -154,7 +154,7 @@ namespace ams::fssystem {
/* Allocate node. */
R_UNLESS(m_node_l1.Allocate(allocator, node_size), fs::ResultBufferAllocationFailed());
- auto node_guard = SCOPE_GUARD { m_node_l1.Free(node_size); };
+ ON_RESULT_FAILURE { m_node_l1.Free(node_size); };
/* Read node. */
R_TRY(node_storage.Read(0, m_node_l1.Get(), node_size));
@@ -186,12 +186,13 @@ namespace ams::fssystem {
m_entry_count = entry_count;
m_offset_count = offset_count;
m_entry_set_count = entry_set_count;
- m_start_offset = start_offset;
- m_end_offset = end_offset;
+
+ m_offset_cache.offsets.start_offset = start_offset;
+ m_offset_cache.offsets.end_offset = end_offset;
+ m_offset_cache.is_initialized = true;
/* Cancel guard. */
- node_guard.Cancel();
- return ResultSuccess();
+ R_SUCCEED();
}
void BucketTree::Initialize(size_t node_size, s64 end_offset) {
@@ -201,7 +202,10 @@ namespace ams::fssystem {
AMS_ASSERT(!this->IsInitialized());
m_node_size = node_size;
- m_end_offset = end_offset;
+
+ m_offset_cache.offsets.start_offset = 0;
+ m_offset_cache.offsets.end_offset = end_offset;
+ m_offset_cache.is_initialized = true;
}
void BucketTree::Finalize() {
@@ -214,69 +218,77 @@ namespace ams::fssystem {
m_entry_count = 0;
m_offset_count = 0;
m_entry_set_count = 0;
- m_start_offset = 0;
- m_end_offset = 0;
+
+ m_offset_cache.offsets.start_offset = 0;
+ m_offset_cache.offsets.end_offset = 0;
+ m_offset_cache.is_initialized = false;
}
}
- Result BucketTree::Find(Visitor *visitor, s64 virtual_address) const {
+ Result BucketTree::Find(Visitor *visitor, s64 virtual_address) {
AMS_ASSERT(visitor != nullptr);
AMS_ASSERT(this->IsInitialized());
R_UNLESS(virtual_address >= 0, fs::ResultInvalidOffset());
R_UNLESS(!this->IsEmpty(), fs::ResultOutOfRange());
- R_TRY(visitor->Initialize(this));
+ BucketTree::Offsets offsets;
+ R_TRY(this->GetOffsets(std::addressof(offsets)));
+
+ R_TRY(visitor->Initialize(this, offsets));
return visitor->Find(virtual_address);
}
Result BucketTree::InvalidateCache() {
/* Invalidate the node storage cache. */
- {
- s64 storage_size;
- R_TRY(m_node_storage.GetSize(std::addressof(storage_size)));
- R_TRY(m_node_storage.OperateRange(fs::OperationId::Invalidate, 0, storage_size));
- }
-
- /* Refresh start/end offsets. */
- {
- /* Read node. */
- R_TRY(m_node_storage.Read(0, m_node_l1.Get(), m_node_size));
-
- /* Verify node. */
- R_TRY(m_node_l1->Verify(0, m_node_size, sizeof(s64)));
-
- /* Validate offsets. */
- const auto * const node = m_node_l1.Get();
-
- s64 start_offset;
- if (m_offset_count < m_entry_set_count && node->GetCount() < m_offset_count) {
- start_offset = *node->GetEnd();
- } else {
- start_offset = *node->GetBegin();
- }
- const auto end_offset = node->GetEndOffset();
-
- R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(), fs::ResultInvalidBucketTreeEntryOffset());
- R_UNLESS(start_offset < end_offset, fs::ResultInvalidBucketTreeEntryOffset());
-
- /* Set refreshed offsets. */
- m_start_offset = start_offset;
- m_end_offset = end_offset;
- }
+ R_TRY(m_node_storage.OperateRange(fs::OperationId::Invalidate, 0, std::numeric_limits::max()));
/* Invalidate the entry storage cache. */
- {
- s64 storage_size;
- R_TRY(m_entry_storage.GetSize(std::addressof(storage_size)));
- R_TRY(m_entry_storage.OperateRange(fs::OperationId::Invalidate, 0, storage_size));
- }
+ R_TRY(m_entry_storage.OperateRange(fs::OperationId::Invalidate, 0, std::numeric_limits::max()));
- return ResultSuccess();
+ /* Reset our offsets. */
+ m_offset_cache.is_initialized = false;
+
+ R_SUCCEED();
}
- Result BucketTree::Visitor::Initialize(const BucketTree *tree) {
+ Result BucketTree::EnsureOffsetCache() {
+ /* If we already have an offset cache, we're good. */
+ R_SUCCEED_IF(m_offset_cache.is_initialized);
+
+ /* Acquire exclusive right to edit the offset cache. */
+ std::scoped_lock lk(m_offset_cache.mutex);
+
+ /* Check again, to be sure. */
+ R_SUCCEED_IF(m_offset_cache.is_initialized);
+
+ /* Read/verify L1. */
+ R_TRY(m_node_storage.Read(0, m_node_l1.Get(), m_node_size));
+ R_TRY(m_node_l1->Verify(0, m_node_size, sizeof(s64)));
+
+ /* Get the node. */
+ auto * const node = m_node_l1.Get();
+
+ s64 start_offset;
+ if (m_offset_count < m_entry_set_count && node->GetCount() < m_offset_count) {
+ start_offset = *node->GetEnd();
+ } else {
+ start_offset = *node->GetBegin();
+ }
+ const auto end_offset = node->GetEndOffset();
+
+ R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(), fs::ResultInvalidBucketTreeEntryOffset());
+ R_UNLESS(start_offset < end_offset, fs::ResultInvalidBucketTreeEntryOffset());
+
+ m_offset_cache.offsets.start_offset = start_offset;
+ m_offset_cache.offsets.end_offset = end_offset;
+ m_offset_cache.is_initialized = true;
+
+ R_SUCCEED();
+ }
+
+ Result BucketTree::Visitor::Initialize(const BucketTree *tree, const BucketTree::Offsets &offsets) {
AMS_ASSERT(tree != nullptr);
AMS_ASSERT(m_tree == nullptr || m_tree == tree);
@@ -284,7 +296,8 @@ namespace ams::fssystem {
m_entry = tree->GetAllocator()->Allocate(tree->m_entry_size);
R_UNLESS(m_entry != nullptr, fs::ResultBufferAllocationFailed());
- m_tree = tree;
+ m_tree = tree;
+ m_offsets = offsets;
}
return ResultSuccess();
@@ -319,7 +332,7 @@ namespace ams::fssystem {
/* Read the new entry. */
const auto entry_size = m_tree->m_entry_size;
const auto entry_offset = impl::GetBucketTreeEntryOffset(m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
- R_TRY(m_tree->m_entry_storage.Read(entry_offset, std::addressof(m_entry), entry_size));
+ R_TRY(m_tree->m_entry_storage.Read(entry_offset, m_entry, entry_size));
/* Note that we changed index. */
m_entry_index = entry_index;
@@ -357,7 +370,7 @@ namespace ams::fssystem {
/* Read the new entry. */
const auto entry_size = m_tree->m_entry_size;
const auto entry_offset = impl::GetBucketTreeEntryOffset(m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
- R_TRY(m_tree->m_entry_storage.Read(entry_offset, std::addressof(m_entry), entry_size));
+ R_TRY(m_tree->m_entry_storage.Read(entry_offset, m_entry, entry_size));
/* Note that we changed index. */
m_entry_index = entry_index;
diff --git a/libraries/libstratosphere/source/fssystem/fssystem_indirect_storage.cpp b/libraries/libstratosphere/source/fssystem/fssystem_indirect_storage.cpp
index fc88f965d..bace3fb33 100644
--- a/libraries/libstratosphere/source/fssystem/fssystem_indirect_storage.cpp
+++ b/libraries/libstratosphere/source/fssystem/fssystem_indirect_storage.cpp
@@ -59,14 +59,17 @@ namespace ams::fssystem {
R_UNLESS(out_entries != nullptr || entry_count == 0, fs::ResultNullptrArgument());
/* Check that our range is valid. */
- R_UNLESS(m_table.Includes(offset, size), fs::ResultOutOfRange());
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ R_UNLESS(table_offsets.IsInclude(offset, size), fs::ResultOutOfRange());
/* Find the offset in our tree. */
BucketTree::Visitor visitor;
R_TRY(m_table.Find(std::addressof(visitor), offset));
{
const auto entry_offset = visitor.Get()->GetVirtualOffset();
- R_UNLESS(0 <= entry_offset && m_table.Includes(entry_offset), fs::ResultInvalidIndirectEntryOffset());
+ R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset), fs::ResultInvalidIndirectEntryOffset());
}
/* Prepare to loop over entries. */
@@ -96,7 +99,7 @@ namespace ams::fssystem {
/* Write the output count. */
*out_entry_count = count;
- return ResultSuccess();
+ R_SUCCEED();
}
Result IndirectStorage::Read(s64 offset, void *buffer, size_t size) {
@@ -110,35 +113,28 @@ namespace ams::fssystem {
/* Ensure that we have a buffer to read to. */
R_UNLESS(buffer != nullptr, fs::ResultNullptrArgument());
- R_TRY(this->OperatePerEntry(offset, size, [=](fs::IStorage *storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
+ R_TRY((this->OperatePerEntry(offset, size, [=](fs::IStorage *storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
R_TRY(storage->Read(data_offset, reinterpret_cast(buffer) + (cur_offset - offset), static_cast(cur_size)));
- return ResultSuccess();
- }));
+ R_SUCCEED();
+ })));
- return ResultSuccess();
+ R_SUCCEED();
}
Result IndirectStorage::OperateRange(void *dst, size_t dst_size, fs::OperationId op_id, s64 offset, s64 size, const void *src, size_t src_size) {
switch (op_id) {
case fs::OperationId::Invalidate:
{
- if (size > 0) {
- /* Validate arguments. */
- R_UNLESS(m_table.Includes(offset, size), fs::ResultOutOfRange());
- if (!m_table.IsEmpty()) {
- /* Invalidate our table's cache. */
- R_TRY(m_table.InvalidateCache());
+ if (!m_table.IsEmpty()) {
+ /* Invalidate our table's cache. */
+ R_TRY(m_table.InvalidateCache());
- /* Operate on our entries. */
- R_TRY(this->OperatePerEntry(offset, size, [=](fs::IStorage *storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
- AMS_UNUSED(cur_offset);
- R_TRY(storage->OperateRange(dst, dst_size, op_id, data_offset, cur_size, src, src_size));
- return ResultSuccess();
- }));
+ /* Invalidate our storages. */
+ for (auto &storage : m_data_storage) {
+ R_TRY(storage.OperateRange(fs::OperationId::Invalidate, 0, std::numeric_limits::max()));
}
- return ResultSuccess();
}
- return ResultSuccess();
+ R_SUCCEED();
}
case fs::OperationId::QueryRange:
{
@@ -148,33 +144,37 @@ namespace ams::fssystem {
if (size > 0) {
/* Validate arguments. */
- R_UNLESS(m_table.Includes(offset, size), fs::ResultOutOfRange());
+ BucketTree::Offsets table_offsets;
+ R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
+
+ R_UNLESS(table_offsets.IsInclude(offset, size), fs::ResultOutOfRange());
+
if (!m_table.IsEmpty()) {
/* Create a new info. */
fs::QueryRangeInfo merged_info;
merged_info.Clear();
/* Operate on our entries. */
- R_TRY(this->OperatePerEntry(offset, size, [=, &merged_info](fs::IStorage *storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
+ R_TRY((this->OperatePerEntry(offset, size, [=, &merged_info](fs::IStorage *storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
AMS_UNUSED(cur_offset);
fs::QueryRangeInfo cur_info;
R_TRY(storage->OperateRange(std::addressof(cur_info), sizeof(cur_info), op_id, data_offset, cur_size, src, src_size));
merged_info.Merge(cur_info);
- return ResultSuccess();
- }));
+ R_SUCCEED();
+ })));
/* Write the merged info. */
*reinterpret_cast(dst) = merged_info;
}
}
- return ResultSuccess();
+ R_SUCCEED();
}
default:
return fs::ResultUnsupportedOperationInIndirectStorageC();
}
- return ResultSuccess();
+ R_SUCCEED();
}
}
diff --git a/libraries/libstratosphere/source/fssystem/fssystem_integrity_romfs_storage.cpp b/libraries/libstratosphere/source/fssystem/fssystem_integrity_romfs_storage.cpp
index edc5e3ac9..4354bf149 100644
--- a/libraries/libstratosphere/source/fssystem/fssystem_integrity_romfs_storage.cpp
+++ b/libraries/libstratosphere/source/fssystem/fssystem_integrity_romfs_storage.cpp
@@ -17,7 +17,7 @@
namespace ams::fssystem {
- Result IntegrityRomFsStorage::Initialize(save::HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash, save::HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info, IBufferManager *bm, IHash256GeneratorFactory *hgf) {
+ Result IntegrityRomFsStorage::Initialize(save::HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash, save::HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info, fs::IBufferManager *bm, IHash256GeneratorFactory *hgf) {
/* Validate preconditions. */
AMS_ASSERT(bm != nullptr);
diff --git a/libraries/libstratosphere/source/fssystem/fssystem_nca_file_system_driver.cpp b/libraries/libstratosphere/source/fssystem/fssystem_nca_file_system_driver.cpp
index 5a3eb5932..36305c894 100644
--- a/libraries/libstratosphere/source/fssystem/fssystem_nca_file_system_driver.cpp
+++ b/libraries/libstratosphere/source/fssystem/fssystem_nca_file_system_driver.cpp
@@ -1105,7 +1105,7 @@ namespace ams::fssystem {
return this->CreateCompressedStorage(out, out_cmp, out_meta, std::move(base_storage), compression_info, m_reader->GetDecompressor(), m_allocator, m_buffer_manager);
}
- Result NcaFileSystemDriver::CreateCompressedStorage(std::shared_ptr *out, std::shared_ptr *out_cmp, std::shared_ptr *out_meta, std::shared_ptr base_storage, const NcaCompressionInfo &compression_info, GetDecompressorFunction get_decompressor, MemoryResource *allocator, IBufferManager *buffer_manager) {
+ Result NcaFileSystemDriver::CreateCompressedStorage(std::shared_ptr *out, std::shared_ptr *out_cmp, std::shared_ptr *out_meta, std::shared_ptr base_storage, const NcaCompressionInfo &compression_info, GetDecompressorFunction get_decompressor, MemoryResource *allocator, fs::IBufferManager *buffer_manager) {
/* Check pre-conditions. */
AMS_ASSERT(out != nullptr);
AMS_ASSERT(base_storage != nullptr);
diff --git a/libraries/libstratosphere/source/fssystem/fssystem_sparse_storage.cpp b/libraries/libstratosphere/source/fssystem/fssystem_sparse_storage.cpp
index 0a9ba3e15..482b56531 100644
--- a/libraries/libstratosphere/source/fssystem/fssystem_sparse_storage.cpp
+++ b/libraries/libstratosphere/source/fssystem/fssystem_sparse_storage.cpp
@@ -29,13 +29,17 @@ namespace ams::fssystem {
R_UNLESS(buffer != nullptr, fs::ResultNullptrArgument());
if (this->GetEntryTable().IsEmpty()) {
- R_UNLESS(this->GetEntryTable().Includes(offset, size), fs::ResultOutOfRange());
+ BucketTree::Offsets table_offsets;
+ R_TRY(this->GetEntryTable().GetOffsets(std::addressof(table_offsets)));
+
+ R_UNLESS(table_offsets.IsInclude(offset, size), fs::ResultOutOfRange());
+
std::memset(buffer, 0, size);
} else {
- R_TRY(this->OperatePerEntry(offset, size, [=](fs::IStorage *storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
+ R_TRY((this->OperatePerEntry(offset, size, [=](fs::IStorage *storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
R_TRY(storage->Read(data_offset, reinterpret_cast(buffer) + (cur_offset - offset), static_cast(cur_size)));
return ResultSuccess();
- }));
+ })));
}
return ResultSuccess();
diff --git a/libraries/libstratosphere/source/fssystem/save/fssystem_block_cache_buffered_storage.cpp b/libraries/libstratosphere/source/fssystem/save/fssystem_block_cache_buffered_storage.cpp
index 5462357c9..bb79b54da 100644
--- a/libraries/libstratosphere/source/fssystem/save/fssystem_block_cache_buffered_storage.cpp
+++ b/libraries/libstratosphere/source/fssystem/save/fssystem_block_cache_buffered_storage.cpp
@@ -17,8 +17,7 @@
namespace ams::fssystem::save {
- BlockCacheBufferedStorage::BlockCacheBufferedStorage()
- : m_buffer_manager(), m_mutex(), m_entries(), m_data_storage(), m_last_result(ResultSuccess()), m_data_size(), m_verification_block_size(), m_verification_block_shift(), m_invalidate_index(), m_max_cache_entry_count(), m_flags(), m_buffer_level(-1)
+ BlockCacheBufferedStorage::BlockCacheBufferedStorage() : m_mutex(), m_data_storage(), m_last_result(ResultSuccess()), m_data_size(), m_verification_block_size(), m_verification_block_shift(), m_flags(), m_buffer_level(-1), m_block_cache_manager()
{
/* ... */
}
@@ -27,40 +26,31 @@ namespace ams::fssystem::save {
this->Finalize();
}
- Result BlockCacheBufferedStorage::Initialize(IBufferManager *bm, os::SdkRecursiveMutex *mtx, IStorage *data, s64 data_size, size_t verif_block_size, s32 max_cache_entries, bool is_real_data, s8 buffer_level, bool is_keep_burst_mode, fs::StorageType storage_type) {
+ Result BlockCacheBufferedStorage::Initialize(fs::IBufferManager *bm, os::SdkRecursiveMutex *mtx, IStorage *data, s64 data_size, size_t verif_block_size, s32 max_cache_entries, bool is_real_data, s8 buffer_level, bool is_keep_burst_mode, fs::StorageType storage_type) {
/* Validate preconditions. */
AMS_ASSERT(data != nullptr);
AMS_ASSERT(bm != nullptr);
AMS_ASSERT(mtx != nullptr);
- AMS_ASSERT(m_buffer_manager == nullptr);
AMS_ASSERT(m_mutex == nullptr);
AMS_ASSERT(m_data_storage == nullptr);
- AMS_ASSERT(m_entries == nullptr);
AMS_ASSERT(max_cache_entries > 0);
- /* Create the entry. */
- m_entries = fs::impl::MakeUnique(static_cast(max_cache_entries));
- R_UNLESS(m_entries != nullptr, fs::ResultAllocationFailureInBlockCacheBufferedStorageA());
+ /* Initialize our manager. */
+ R_TRY(m_block_cache_manager.Initialize(bm, max_cache_entries));
/* Set members. */
- m_buffer_manager = bm;
m_mutex = mtx;
m_data_storage = data;
m_data_size = data_size;
m_verification_block_size = verif_block_size;
m_last_result = ResultSuccess();
- m_invalidate_index = 0;
- m_max_cache_entry_count = max_cache_entries;
m_flags = 0;
m_buffer_level = buffer_level;
m_storage_type = storage_type;
/* Calculate block shift. */
m_verification_block_shift = ILog2(static_cast(verif_block_size));
- AMS_ASSERT(static_cast(1ull << m_verification_block_shift) == m_verification_block_size);
-
- /* Clear the entry. */
- std::memset(m_entries.get(), 0, sizeof(CacheEntry) * m_max_cache_entry_count);
+ AMS_ASSERT(static_cast(UINT64_C(1) << m_verification_block_shift) == m_verification_block_size);
/* Set burst mode. */
this->SetKeepBurstMode(is_keep_burst_mode);
@@ -68,32 +58,30 @@ namespace ams::fssystem::save {
/* Set real data cache. */
this->SetRealDataCache(is_real_data);
- return ResultSuccess();
+ R_SUCCEED();
}
void BlockCacheBufferedStorage::Finalize() {
- if (m_entries != nullptr) {
+ if (m_block_cache_manager.IsInitialized()) {
/* Invalidate all cache entries. */
this->InvalidateAllCacheEntries();
+ /* Finalize our block cache manager. */
+ m_block_cache_manager.Finalize();
+
/* Clear members. */
- m_buffer_manager = nullptr;
m_mutex = nullptr;
m_data_storage = nullptr;
m_data_size = 0;
m_verification_block_size = 0;
m_verification_block_shift = 0;
- m_invalidate_index = 0;
- m_max_cache_entry_count = 0;
-
- m_entries.reset();
}
}
Result BlockCacheBufferedStorage::Read(s64 offset, void *buffer, size_t size) {
/* Validate pre-conditions. */
AMS_ASSERT(m_data_storage != nullptr);
- AMS_ASSERT(m_buffer_manager != nullptr);
+ AMS_ASSERT(m_block_cache_manager.IsInitialized());
/* Ensure we aren't already in a failed state. */
R_TRY(m_last_result);
@@ -138,7 +126,7 @@ namespace ams::fssystem::save {
R_SUCCEED_IF(aligned_offset >= aligned_offset_end);
/* Ensure we destroy the head buffer. */
- auto head_guard = SCOPE_GUARD { this->DestroyBuffer(std::addressof(head_entry), head_range); };
+ auto head_guard = SCOPE_GUARD { m_block_cache_manager.ReleaseCacheEntry(std::addressof(head_entry), head_range); };
/* Read the tail cache. */
CacheEntry tail_entry = {};
@@ -150,7 +138,7 @@ namespace ams::fssystem::save {
R_SUCCEED_IF(aligned_offset >= aligned_offset_end);
/* Ensure that we destroy the tail buffer. */
- auto tail_guard = SCOPE_GUARD { this->DestroyBuffer(std::addressof(tail_entry), tail_range); };
+ auto tail_guard = SCOPE_GUARD { m_block_cache_manager.ReleaseCacheEntry(std::addressof(tail_entry), tail_range); };
/* Try to do a bulk read. */
if (bulk_read_enabled) {
@@ -165,7 +153,7 @@ namespace ams::fssystem::save {
} R_END_TRY_CATCH;
/* Se successfully did a bulk read, so we're done. */
- return ResultSuccess();
+ R_SUCCEED();
} while (0);
}
}
@@ -198,27 +186,27 @@ namespace ams::fssystem::save {
/* Determine where to read data into, and ensure that our entry is aligned. */
char *src = reinterpret_cast(range.first);
- AMS_ASSERT(util::IsAligned(entry.size, block_alignment));
+ AMS_ASSERT(util::IsAligned(entry.range.size, block_alignment));
/* If the entry isn't cached, read the data. */
if (!entry.is_cached) {
- if (Result result = m_data_storage->Read(entry.offset, src, entry.size); R_FAILED(result)) {
- this->DestroyBuffer(std::addressof(entry), range);
+ if (const Result result = m_data_storage->Read(entry.range.offset, src, entry.range.size); R_FAILED(result)) {
+ m_block_cache_manager.ReleaseCacheEntry(std::addressof(entry), range);
return this->UpdateLastResult(result);
}
entry.is_cached = true;
}
/* Validate the entry extents. */
- AMS_ASSERT(static_cast(entry.offset) <= aligned_offset);
- AMS_ASSERT(aligned_offset < static_cast(entry.offset + entry.size));
+ AMS_ASSERT(static_cast(entry.range.offset) <= aligned_offset);
+ AMS_ASSERT(aligned_offset < entry.range.GetEndOffset());
AMS_ASSERT(aligned_offset <= read_offset);
/* Copy the data. */
{
/* Determine where and how much to copy. */
- const s64 buffer_offset = read_offset - entry.offset;
- const size_t copy_size = std::min(read_size, static_cast(entry.offset + entry.size - read_offset));
+ const s64 buffer_offset = read_offset - entry.range.offset;
+ const size_t copy_size = std::min(read_size, static_cast(entry.range.GetEndOffset() - read_offset));
/* Actually copy the data. */
std::memcpy(dst, src + buffer_offset, copy_size);
@@ -231,20 +219,20 @@ namespace ams::fssystem::save {
/* Release the cache entry. */
R_TRY(this->UpdateLastResult(this->StoreOrDestroyBuffer(range, std::addressof(entry))));
- aligned_offset = entry.offset + entry.size;
+ aligned_offset = entry.range.GetEndOffset();
}
}
/* Ensure that we read all the data. */
AMS_ASSERT(read_size == 0);
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::Write(s64 offset, const void *buffer, size_t size) {
/* Validate pre-conditions. */
AMS_ASSERT(m_data_storage != nullptr);
- AMS_ASSERT(m_buffer_manager != nullptr);
+ AMS_ASSERT(m_block_cache_manager.IsInitialized());
/* Ensure we aren't already in a failed state. */
R_TRY(m_last_result);
@@ -303,24 +291,24 @@ namespace ams::fssystem::save {
char *dst = reinterpret_cast(range.first);
/* If the entry isn't cached and we're writing a partial entry, read in the entry. */
- if (!entry.is_cached && ((offset != entry.offset) || (offset + size < entry.offset + entry.size))) {
- if (Result result = m_data_storage->Read(entry.offset, dst, entry.size); R_FAILED(result)) {
- this->DestroyBuffer(std::addressof(entry), range);
+ if (!entry.is_cached && ((offset != entry.range.offset) || (offset + size < static_cast(entry.range.GetEndOffset())))) {
+ if (Result result = m_data_storage->Read(entry.range.offset, dst, entry.range.size); R_FAILED(result)) {
+ m_block_cache_manager.ReleaseCacheEntry(std::addressof(entry), range);
return this->UpdateLastResult(result);
}
}
entry.is_cached = true;
/* Validate the entry extents. */
- AMS_ASSERT(static_cast(entry.offset) <= aligned_offset);
- AMS_ASSERT(aligned_offset < static_cast(entry.offset + entry.size));
+ AMS_ASSERT(static_cast(entry.range.offset) <= aligned_offset);
+ AMS_ASSERT(aligned_offset < entry.range.GetEndOffset());
AMS_ASSERT(aligned_offset <= offset);
/* Copy the data. */
{
/* Determine where and how much to copy. */
- const s64 buffer_offset = offset - entry.offset;
- const size_t copy_size = std::min(size, static_cast(entry.offset + entry.size - offset));
+ const s64 buffer_offset = offset - entry.range.offset;
+ const size_t copy_size = std::min(size, static_cast(entry.range.GetEndOffset() - offset));
/* Actually copy the data. */
std::memcpy(dst + buffer_offset, src, copy_size);
@@ -339,10 +327,10 @@ namespace ams::fssystem::save {
/* Store the associated buffer. */
CacheIndex index;
- R_TRY(this->UpdateLastResult(this->StoreAssociateBuffer(std::addressof(index), range, entry)));
+ R_TRY(this->UpdateLastResult(this->StoreOrDestroyBuffer(std::addressof(index), range, std::addressof(entry))));
/* Set the after aligned offset. */
- aligned_offset = entry.offset + entry.size;
+ aligned_offset = entry.range.GetEndOffset();
/* If we need to, flush the cache entry. */
if (index >= 0 && IsEnabledKeepBurstMode() && offset == aligned_offset && (block_alignment * 2 <= size)) {
@@ -355,7 +343,7 @@ namespace ams::fssystem::save {
/* Ensure that didn't end up in a failure state. */
R_TRY(m_last_result);
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::GetSize(s64 *out) {
@@ -365,13 +353,13 @@ namespace ams::fssystem::save {
/* Set the size. */
*out = m_data_size;
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::Flush() {
/* Validate pre-conditions. */
AMS_ASSERT(m_data_storage != nullptr);
- AMS_ASSERT(m_buffer_manager != nullptr);
+ AMS_ASSERT(m_block_cache_manager.IsInitialized());
/* Ensure we aren't already in a failed state. */
R_TRY(m_last_result);
@@ -385,7 +373,7 @@ namespace ams::fssystem::save {
/* Set blocking buffer manager allocations. */
buffers::EnableBlockingBufferManagerAllocation();
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::OperateRange(void *dst, size_t dst_size, fs::OperationId op_id, s64 offset, s64 size, const void *src, size_t src_size) {
@@ -397,34 +385,34 @@ namespace ams::fssystem::save {
switch (op_id) {
case fs::OperationId::FillZero:
{
- R_TRY(this->ClearImpl(offset, size));
- return ResultSuccess();
+ R_TRY(this->FillZeroImpl(offset, size));
+ R_SUCCEED();
}
case fs::OperationId::DestroySignature:
{
- R_TRY(this->ClearSignatureImpl(offset, size));
- return ResultSuccess();
+ R_TRY(this->DestroySignatureImpl(offset, size));
+ R_SUCCEED();
}
case fs::OperationId::Invalidate:
{
R_UNLESS(m_storage_type != fs::StorageType_SaveData, fs::ResultUnsupportedOperationInBlockCacheBufferedStorageB());
- R_TRY(this->InvalidateCacheImpl(offset, size));
- return ResultSuccess();
+ R_TRY(this->InvalidateImpl());
+ R_SUCCEED();
}
case fs::OperationId::QueryRange:
{
R_TRY(this->QueryRangeImpl(dst, dst_size, offset, size));
- return ResultSuccess();
+ R_SUCCEED();
}
default:
- return fs::ResultUnsupportedOperationInBlockCacheBufferedStorageC();
+ R_THROW(fs::ResultUnsupportedOperationInBlockCacheBufferedStorageC());
}
}
Result BlockCacheBufferedStorage::Commit() {
/* Validate pre-conditions. */
AMS_ASSERT(m_data_storage != nullptr);
- AMS_ASSERT(m_buffer_manager != nullptr);
+ AMS_ASSERT(m_block_cache_manager.IsInitialized());
/* Ensure we aren't already in a failed state. */
R_TRY(m_last_result);
@@ -432,47 +420,34 @@ namespace ams::fssystem::save {
/* Flush all cache entries. */
R_TRY(this->UpdateLastResult(this->FlushAllCacheEntries()));
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::OnRollback() {
/* Validate pre-conditions. */
- AMS_ASSERT(m_buffer_manager != nullptr);
+ AMS_ASSERT(m_block_cache_manager.IsInitialized());
/* Ensure we aren't already in a failed state. */
R_TRY(m_last_result);
/* Release all valid entries back to the buffer manager. */
- const auto max_cache_entry_count = this->GetMaxCacheEntryCount();
- for (s32 i = 0; i < max_cache_entry_count; i++) {
- const auto &entry = m_entries[i];
- if (entry.is_valid) {
- if (entry.is_write_back) {
- AMS_ASSERT(entry.memory_address != 0 && entry.handle == 0);
- m_buffer_manager->DeallocateBuffer(entry.memory_address, entry.memory_size);
- } else {
- AMS_ASSERT(entry.memory_address == 0 && entry.handle != 0);
- const auto memory_range = m_buffer_manager->AcquireCache(entry.handle);
- if (memory_range.first != 0) {
- m_buffer_manager->DeallocateBuffer(memory_range.first, memory_range.second);
- }
- }
+ const auto max_cache_entry_count = m_block_cache_manager.GetCount();
+ for (auto index = 0; index < max_cache_entry_count; index++) {
+ if (const auto &entry = m_block_cache_manager[index]; entry.is_valid) {
+ m_block_cache_manager.InvalidateCacheEntry(index);
}
}
- /* Clear all entries. */
- std::memset(m_entries.get(), 0, sizeof(CacheEntry) * max_cache_entry_count);
-
- return ResultSuccess();
+ R_SUCCEED();
}
- Result BlockCacheBufferedStorage::ClearImpl(s64 offset, s64 size) {
+ Result BlockCacheBufferedStorage::FillZeroImpl(s64 offset, s64 size) {
/* Ensure we aren't already in a failed state. */
R_TRY(m_last_result);
/* Get our storage size. */
s64 storage_size = 0;
- R_TRY(this->GetSize(std::addressof(storage_size)));
+ R_TRY(this->UpdateLastResult(m_data_storage->GetSize(std::addressof(storage_size))));
/* Check the access range. */
R_UNLESS(0 <= offset && offset < storage_size, fs::ResultInvalidOffset());
@@ -542,16 +517,16 @@ namespace ams::fssystem::save {
/* Set blocking buffer manager allocations. */
buffers::EnableBlockingBufferManagerAllocation();
- return ResultSuccess();
+ R_SUCCEED();
}
- Result BlockCacheBufferedStorage::ClearSignatureImpl(s64 offset, s64 size) {
+ Result BlockCacheBufferedStorage::DestroySignatureImpl(s64 offset, s64 size) {
/* Ensure we aren't already in a failed state. */
R_TRY(m_last_result);
/* Get our storage size. */
s64 storage_size = 0;
- R_TRY(this->GetSize(std::addressof(storage_size)));
+ R_TRY(this->UpdateLastResult(m_data_storage->GetSize(std::addressof(storage_size))));
/* Check the access range. */
R_UNLESS(0 <= offset && offset < storage_size, fs::ResultInvalidOffset());
@@ -569,27 +544,20 @@ namespace ams::fssystem::save {
/* Set blocking buffer manager allocations. */
buffers::EnableBlockingBufferManagerAllocation();
- return ResultSuccess();
+ R_SUCCEED();
}
- Result BlockCacheBufferedStorage::InvalidateCacheImpl(s64 offset, s64 size) {
- /* Invalidate the entries corresponding to the range. */
- /* NOTE: Nintendo does not check the result of this invalidation. */
- this->InvalidateRangeCacheEntries(offset, size);
+ Result BlockCacheBufferedStorage::InvalidateImpl() {
+ /* Invalidate cache entries. */
+ {
+ std::scoped_lock lk(*m_mutex);
- /* Get our storage size. */
- s64 storage_size = 0;
- R_TRY(this->GetSize(std::addressof(storage_size)));
-
- /* Determine the extents we can actually query. */
- const auto actual_size = std::min(size, storage_size - offset);
- const auto aligned_offset = util::AlignDown(offset, m_verification_block_size);
- const auto aligned_offset_end = util::AlignUp(offset + actual_size, m_verification_block_size);
- const auto aligned_size = aligned_offset_end - aligned_offset;
+ m_block_cache_manager.Invalidate();
+ }
/* Invalidate the aligned range. */
{
- Result result = m_data_storage->OperateRange(fs::OperationId::Invalidate, aligned_offset, aligned_size);
+ Result result = m_data_storage->OperateRange(fs::OperationId::Invalidate, 0, std::numeric_limits::max());
AMS_ASSERT(!fs::ResultBufferAllocationFailed::Includes(result));
R_TRY(result);
}
@@ -599,7 +567,7 @@ namespace ams::fssystem::save {
m_last_result = ResultSuccess();
}
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::QueryRangeImpl(void *dst, size_t dst_size, s64 offset, s64 size) {
@@ -616,29 +584,7 @@ namespace ams::fssystem::save {
/* Query the aligned range. */
R_TRY(this->UpdateLastResult(m_data_storage->OperateRange(dst, dst_size, fs::OperationId::QueryRange, aligned_offset, aligned_size, nullptr, 0)));
- return ResultSuccess();
- }
-
- bool BlockCacheBufferedStorage::ExistsRedundantCacheEntry(const CacheEntry &entry) const {
- /* Get the entry's extents. */
- const s64 offset = entry.offset;
- const size_t size = entry.size;
-
- /* Lock our mutex. */
- std::scoped_lock lk(*m_mutex);
-
- /* Iterate over all entries, checking if any overlap our extents. */
- const auto max_cache_entry_count = this->GetMaxCacheEntryCount();
- for (auto i = 0; i < max_cache_entry_count; ++i) {
- const auto &entry = m_entries[i];
- if (entry.is_valid && (entry.is_write_back ? entry.memory_address != 0 : entry.handle != 0)) {
- if (entry.offset < static_cast(offset + size) && offset < static_cast(entry.offset + entry.size)) {
- return true;
- }
- }
- }
-
- return false;
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::GetAssociateBuffer(MemoryRange *out_range, CacheEntry *out_entry, s64 offset, size_t ideal_size, bool is_allocate_for_write) {
@@ -646,7 +592,7 @@ namespace ams::fssystem::save {
/* Validate pre-conditions. */
AMS_ASSERT(m_data_storage != nullptr);
- AMS_ASSERT(m_buffer_manager != nullptr);
+ AMS_ASSERT(m_block_cache_manager.IsInitialized());
AMS_ASSERT(out_range != nullptr);
AMS_ASSERT(out_entry != nullptr);
@@ -654,21 +600,19 @@ namespace ams::fssystem::save {
std::scoped_lock lk(*m_mutex);
/* Get the maximum cache entry count. */
- const CacheIndex max_cache_entry_count = static_cast(this->GetMaxCacheEntryCount());
+ const CacheIndex max_cache_entry_count = m_block_cache_manager.GetCount();
/* Locate the index of the cache entry, if present. */
CacheIndex index;
size_t actual_size = ideal_size;
for (index = 0; index < max_cache_entry_count; ++index) {
- const auto &entry = m_entries[index];
- if (entry.is_valid && (entry.is_write_back ? entry.memory_address != 0 : entry.handle != 0)) {
- const s64 entry_offset = entry.offset;
- if (entry_offset <= offset && offset < static_cast(entry_offset + entry.size)) {
+ if (const auto &entry = m_block_cache_manager[index]; entry.IsAllocated()) {
+ if (entry.range.IsIncluded(offset)) {
break;
}
- if (offset <= entry_offset && entry_offset < static_cast(offset + actual_size)) {
- actual_size = static_cast(entry_offset - offset);
+ if (offset <= entry.range.offset && entry.range.offset < static_cast(offset + actual_size)) {
+ actual_size = static_cast(entry.range.offset - offset);
}
}
}
@@ -679,38 +623,16 @@ namespace ams::fssystem::save {
/* If we located an entry, use it. */
if (index != max_cache_entry_count) {
- auto &entry = m_entries[index];
+ m_block_cache_manager.AcquireCacheEntry(out_entry, out_range, index);
- /* Get the range of the found entry. */
- if (entry.is_write_back) {
- *out_range = std::make_pair(entry.memory_address, entry.memory_size);
- } else {
- *out_range = m_buffer_manager->AcquireCache(entry.handle);
- }
-
- /* Get the found entry. */
- *out_entry = entry;
- AMS_ASSERT(out_entry->is_valid);
- AMS_ASSERT(out_entry->is_cached);
-
- /* Clear the entry in the cache. */
- entry.is_valid = false;
- entry.handle = 0;
- entry.memory_address = 0;
- entry.memory_size = 0;
-
- /* Set the output entry. */
- out_entry->is_valid = true;
- out_entry->handle = 0;
- out_entry->memory_address = 0;
- out_entry->memory_size = 0;
+ actual_size = out_entry->range.size - (offset - out_entry->range.offset);
}
/* If we don't have an out entry, allocate one. */
if (out_range->first == 0) {
/* Ensure that the allocatable size is above a threshold. */
- const auto size_threshold = m_buffer_manager->GetTotalSize() / 8;
- if (m_buffer_manager->GetTotalAllocatableSize() < size_threshold) {
+ const auto size_threshold = m_block_cache_manager.GetAllocator()->GetTotalSize() / 8;
+ if (m_block_cache_manager.GetAllocator()->GetTotalAllocatableSize() < size_threshold) {
R_TRY(this->FlushAllCacheEntries());
}
@@ -725,7 +647,7 @@ namespace ams::fssystem::save {
AMS_ASSERT(actual_size >= block_alignment);
/* Allocate a buffer. */
- R_TRY(buffers::AllocateBufferUsingBufferManagerContext(out_range, m_buffer_manager, actual_size, IBufferManager::BufferAttribute(m_buffer_level), [=](const MemoryRange &buffer) {
+ R_TRY(buffers::AllocateBufferUsingBufferManagerContext(out_range, m_block_cache_manager.GetAllocator(), actual_size, fs::IBufferManager::BufferAttribute(m_buffer_level), [=](const MemoryRange &buffer) {
return buffer.first != 0 && block_alignment <= buffer.second;
}, AMS_CURRENT_FUNCTION_NAME));
@@ -737,163 +659,106 @@ namespace ams::fssystem::save {
out_entry->is_write_back = false;
out_entry->is_cached = false;
out_entry->is_flushing = false;
- out_entry->handle = false;
+ out_entry->handle = 0;
out_entry->memory_address = 0;
out_entry->memory_size = 0;
- out_entry->offset = offset;
- out_entry->size = actual_size;
+ out_entry->range.offset = offset;
+ out_entry->range.size = actual_size;
+ out_entry->lru_counter = 0;
}
- /* Ensure that we ended up with a coherent out range. */
- AMS_ASSERT(out_range->second >= out_entry->size);
+ /* Check that we ended up with a coherent out range. */
+ AMS_ASSERT(out_range->second >= out_entry->range.size);
- return ResultSuccess();
+ R_SUCCEED();
}
- void BlockCacheBufferedStorage::DestroyBuffer(CacheEntry *entry, const MemoryRange &range) {
- /* Validate pre-conditions. */
- AMS_ASSERT(m_buffer_manager != nullptr);
- AMS_ASSERT(entry != nullptr);
-
- /* Set the entry as invalid and not cached. */
- entry->is_cached = false;
- entry->is_valid = false;
-
- /* Release the entry. */
- m_buffer_manager->DeallocateBuffer(range.first, range.second);
- }
-
- Result BlockCacheBufferedStorage::StoreAssociateBuffer(CacheIndex *out, const MemoryRange &range, const CacheEntry &entry) {
+ Result BlockCacheBufferedStorage::StoreOrDestroyBuffer(CacheIndex *out, const MemoryRange &range, CacheEntry *entry) {
/* Validate pre-conditions. */
AMS_ASSERT(out != nullptr);
/* Lock our mutex. */
std::scoped_lock lk(*m_mutex);
+ /* In the event that we fail, release our buffer. */
+ ON_RESULT_FAILURE { m_block_cache_manager.ReleaseCacheEntry(entry, range); };
+
/* If the entry is write-back, ensure we don't exceed certain dirtiness thresholds. */
- if (entry.is_write_back) {
+ if (entry->is_write_back) {
R_TRY(this->ControlDirtiness());
}
- /* Get the maximum cache entry count. */
- const CacheIndex max_cache_entry_count = static_cast(this->GetMaxCacheEntryCount());
- AMS_ASSERT(max_cache_entry_count > 0);
-
- /* Locate the index of an unused cache entry. */
- CacheIndex index;
- for (index = 0; index < max_cache_entry_count; ++index) {
- if (!m_entries[index].is_valid) {
- break;
- }
- }
+ /* Get unused cache entry index. */
+ CacheIndex empty_index, lru_index;
+ m_block_cache_manager.GetEmptyCacheEntryIndex(std::addressof(empty_index), std::addressof(lru_index));
/* If all entries are valid, we need to invalidate one. */
- if (index == max_cache_entry_count) {
- /* Increment the index to invalidate. */
- m_invalidate_index = (m_invalidate_index + 1) % max_cache_entry_count;
+ if (empty_index == BlockCacheManager::InvalidCacheIndex) {
+ /* Invalidate the lease recently used entry. */
+ empty_index = lru_index;
- /* Get the entry to invalidate. */
- const CacheEntry *entry_to_invalidate = std::addressof(m_entries[m_invalidate_index]);
+ /* Get the entry to invalidate, sanity check that we can invalidate it. */
+ const CacheEntry &entry_to_invalidate = m_block_cache_manager[empty_index];
+ AMS_ASSERT(entry_to_invalidate.is_valid);
+ AMS_ASSERT(!entry_to_invalidate.is_flushing);
AMS_UNUSED(entry_to_invalidate);
- /* Ensure that the entry can be invalidated. */
- AMS_ASSERT(entry_to_invalidate->is_valid);
- AMS_ASSERT(!entry_to_invalidate->is_flushing);
-
/* Invalidate the entry. */
- R_TRY(this->FlushCacheEntry(m_invalidate_index, true));
+ R_TRY(this->FlushCacheEntry(empty_index, true));
/* Check that the entry was invalidated successfully. */
- AMS_ASSERT(!entry_to_invalidate->is_valid);
- AMS_ASSERT(!entry_to_invalidate->is_flushing);
-
- index = m_invalidate_index;
+ AMS_ASSERT(!entry_to_invalidate.is_valid);
+ AMS_ASSERT(!entry_to_invalidate.is_flushing);
}
/* Store the entry. */
- CacheEntry *entry_ptr = std::addressof(m_entries[index]);
- *entry_ptr = entry;
-
- /* Assert that the entry is valid to store. */
- AMS_ASSERT(entry_ptr->is_valid);
- AMS_ASSERT(entry_ptr->is_cached);
- AMS_ASSERT(entry_ptr->handle == 0);
- AMS_ASSERT(entry_ptr->memory_address == 0);
-
- /* Ensure that the new entry isn't redundant. */
- if (!ExistsRedundantCacheEntry(*entry_ptr)) {
- /* Store the cache's buffer. */
- if (entry_ptr->is_write_back) {
- entry_ptr->handle = 0;
- entry_ptr->memory_address = range.first;
- entry_ptr->memory_size = range.second;
- } else {
- entry_ptr->handle = m_buffer_manager->RegisterCache(range.first, range.second, IBufferManager::BufferAttribute(m_buffer_level));
- entry_ptr->memory_address = 0;
- entry_ptr->memory_size = 0;
- }
-
- /* Set the out index. */
- AMS_ASSERT(entry_ptr->is_valid);
- *out = index;
- m_invalidate_index = index;
+ if (m_block_cache_manager.SetCacheEntry(empty_index, *entry, range, fs::IBufferManager::BufferAttribute(m_buffer_level))) {
+ *out = empty_index;
} else {
- /* If a redundant entry exists, we don't need the newly stored entry. */
- m_buffer_manager->DeallocateBuffer(range.first, range.second);
- entry_ptr->is_valid = false;
- *out = -1;
+ *out = BlockCacheManager::InvalidCacheIndex;
}
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::FlushCacheEntry(CacheIndex index, bool invalidate) {
/* Lock our mutex. */
std::scoped_lock lk(*m_mutex);
- /* Get the entry. */
- CacheEntry *entry = std::addressof(m_entries[index]);
- MemoryRange memory_range;
-
- /* Check that the entry's state allows for flush. */
- AMS_ASSERT(entry->is_valid);
- AMS_ASSERT(!entry->is_flushing);
+ /* Get the entry, sanity check that the entry's state allows for flush. */
+ auto &entry = m_block_cache_manager[index];
+ AMS_ASSERT(entry.is_valid);
+ AMS_ASSERT(!entry.is_flushing);
/* If we're not write back (i.e. an invalidate is happening), just release the buffer. */
- if (!entry->is_write_back) {
+ if (!entry.is_write_back) {
AMS_ASSERT(invalidate);
- /* Get and release the buffer. */
- memory_range = m_buffer_manager->AcquireCache(entry->handle);
- if (memory_range.first != 0) {
- m_buffer_manager->DeallocateBuffer(memory_range.first, memory_range.second);
- }
+ m_block_cache_manager.InvalidateCacheEntry(index);
- /* The entry is no longer valid. */
- entry->is_valid = false;
-
- return ResultSuccess();
+ R_SUCCEED();
}
- /* Note that we've started flushing. */
- entry->is_flushing = true;
+ /* Note that we've started flushing, while we process. */
+ m_block_cache_manager.SetFlushing(index, true);
+ ON_SCOPE_EXIT { m_block_cache_manager.SetFlushing(index, false); };
/* Create and check our memory range. */
- memory_range = std::make_pair(entry->memory_address, entry->memory_size);
+ MemoryRange memory_range = fs::IBufferManager::MakeMemoryRange(entry.memory_address, entry.memory_size);
AMS_ASSERT(memory_range.first != 0);
- AMS_ASSERT(memory_range.second >= entry->size);
+ AMS_ASSERT(memory_range.second >= entry.range.size);
/* Validate the entry's offset. */
- AMS_ASSERT(entry->offset >= 0);
- AMS_ASSERT(entry->offset < m_data_size);
- AMS_ASSERT(util::IsAligned(entry->offset, m_verification_block_size));
+ AMS_ASSERT(entry.range.offset >= 0);
+ AMS_ASSERT(entry.range.offset < m_data_size);
+ AMS_ASSERT(util::IsAligned(entry.range.offset, m_verification_block_size));
/* Write back the data. */
Result result = ResultSuccess();
- size_t write_size = entry->size;
+ size_t write_size = entry.range.size;
if (R_SUCCEEDED(m_last_result)) {
/* Set blocking buffer manager allocations. */
- result = m_data_storage->Write(entry->offset, reinterpret_cast(memory_range.first), write_size);
+ result = m_data_storage->Write(entry.range.offset, reinterpret_cast(memory_range.first), write_size);
/* Check the result. */
AMS_ASSERT(!fs::ResultBufferAllocationFailed::Includes(result));
@@ -902,41 +767,34 @@ namespace ams::fssystem::save {
}
/* Set that we're not write-back. */
- entry->is_write_back = false;
+ m_block_cache_manager.SetWriteBack(index, false);
/* If we're invalidating, release the buffer. Otherwise, register the flushed data. */
if (invalidate) {
- m_buffer_manager->DeallocateBuffer(memory_range.first, memory_range.second);
- entry->is_valid = false;
- entry->is_flushing = false;
+ m_block_cache_manager.ReleaseCacheEntry(index, memory_range);
} else {
- AMS_ASSERT(entry->is_valid);
-
- entry->handle = m_buffer_manager->RegisterCache(memory_range.first, memory_range.second, IBufferManager::BufferAttribute(m_buffer_level));
-
- entry->memory_address = 0;
- entry->memory_size = 0;
- entry->is_flushing = false;
+ AMS_ASSERT(entry.is_valid);
+ m_block_cache_manager.RegisterCacheEntry(index, memory_range, fs::IBufferManager::BufferAttribute(m_buffer_level));
}
/* Try to succeed. */
R_TRY(result);
/* We succeeded. */
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::FlushRangeCacheEntries(s64 offset, s64 size, bool invalidate) {
/* Validate pre-conditions. */
AMS_ASSERT(m_data_storage != nullptr);
- AMS_ASSERT(m_buffer_manager != nullptr);
+ AMS_ASSERT(m_block_cache_manager.IsInitialized());
/* Iterate over all entries that fall within the range. */
Result result = ResultSuccess();
- const auto max_cache_entry_count = this->GetMaxCacheEntryCount();
+ const auto max_cache_entry_count = m_block_cache_manager.GetCount();
for (auto i = 0; i < max_cache_entry_count; ++i) {
- auto &entry = m_entries[i];
- if (entry.is_valid && (entry.is_write_back || invalidate) && (entry.offset < (offset + size)) && (offset < static_cast(entry.offset + entry.size))) {
+ auto &entry = m_block_cache_manager[i];
+ if (entry.is_valid && (entry.is_write_back || invalidate) && (entry.range.offset < (offset + size)) && (offset < entry.range.GetEndOffset())) {
const auto cur_result = this->FlushCacheEntry(i, invalidate);
if (R_FAILED(cur_result) && R_SUCCEEDED(result)) {
result = cur_result;
@@ -948,81 +806,52 @@ namespace ams::fssystem::save {
R_TRY(result);
/* We succeeded. */
- return ResultSuccess();
- }
-
- void BlockCacheBufferedStorage::InvalidateRangeCacheEntries(s64 offset, s64 size) {
- /* Validate pre-conditions. */
- AMS_ASSERT(m_data_storage != nullptr);
- AMS_ASSERT(m_buffer_manager != nullptr);
-
- /* Iterate over all entries that fall within the range. */
- const auto max_cache_entry_count = this->GetMaxCacheEntryCount();
- for (auto i = 0; i < max_cache_entry_count; ++i) {
- auto &entry = m_entries[i];
- if (entry.is_valid && (entry.offset < (offset + size)) && (offset < static_cast(entry.offset + entry.size))) {
- if (entry.is_write_back) {
- AMS_ASSERT(entry.memory_address != 0 && entry.handle == 0);
- m_buffer_manager->DeallocateBuffer(entry.memory_address, entry.memory_size);
- } else {
- AMS_ASSERT(entry.memory_address == 0 && entry.handle != 0);
- const auto memory_range = m_buffer_manager->AcquireCache(entry.handle);
- if (memory_range.first != 0) {
- m_buffer_manager->DeallocateBuffer(memory_range.first, memory_range.second);
- }
- }
-
- /* Mark the entry as invalidated. */
- entry.is_valid = false;
- entry.is_write_back = false;
- entry.is_flushing = true;
- }
- }
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::FlushAllCacheEntries() {
R_TRY(this->FlushRangeCacheEntries(0, std::numeric_limits::max(), false));
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::InvalidateAllCacheEntries() {
R_TRY(this->FlushRangeCacheEntries(0, std::numeric_limits::max(), true));
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::ControlDirtiness() {
/* Get and validate the max cache entry count. */
- const auto max_cache_entry_count = this->GetMaxCacheEntryCount();
+ const auto max_cache_entry_count = m_block_cache_manager.GetCount();
AMS_ASSERT(max_cache_entry_count > 0);
/* Get size metrics from the buffer manager. */
- const auto total_size = m_buffer_manager->GetTotalSize();
- const auto allocatable_size = m_buffer_manager->GetTotalAllocatableSize();
+ const auto total_size = m_block_cache_manager.GetAllocator()->GetTotalSize();
+ const auto allocatable_size = m_block_cache_manager.GetAllocator()->GetTotalAllocatableSize();
/* If we have enough allocatable space, we don't need to do anything. */
R_SUCCEED_IF(allocatable_size >= total_size / 4);
- /* Setup for flushing dirty entries. */
- auto threshold = 2;
- auto dirty_count = 0;
- auto flushed_index = m_invalidate_index;
-
- /* Iterate over all entries (starting with the invalidate index), and flush dirty entries once threshold is met. */
- for (auto i = 0; i < max_cache_entry_count; ++i) {
- auto index = (m_invalidate_index + 1 + i) % max_cache_entry_count;
- if (m_entries[index].is_valid && m_entries[index].is_write_back) {
- ++dirty_count;
- if (threshold <= dirty_count) {
- R_TRY(this->FlushCacheEntry(index, false));
- flushed_index = index;
+ /* Iterate over all entries (up to the threshold) and flush the least recently used dirty entry. */
+ constexpr auto Threshold = 2;
+ for (int n = 0; n < Threshold; ++n) {
+ auto flushed_index = BlockCacheManager::InvalidCacheIndex;
+ for (auto index = 0; index < max_cache_entry_count; ++index) {
+ if (auto &entry = m_block_cache_manager[index]; entry.is_valid && entry.is_write_back) {
+ if (flushed_index == BlockCacheManager::InvalidCacheIndex || m_block_cache_manager[flushed_index].lru_counter < entry.lru_counter) {
+ flushed_index = index;
+ }
}
}
+
+ /* If we can't flush anything, break. */
+ if (flushed_index == BlockCacheManager::InvalidCacheIndex) {
+ break;
+ }
+
+ R_TRY(this->FlushCacheEntry(flushed_index, false));
}
- /* Update the invalidate index. */
- m_invalidate_index = flushed_index;
-
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::UpdateLastResult(Result result) {
@@ -1035,7 +864,7 @@ namespace ams::fssystem::save {
R_TRY(result);
/* We succeeded. */
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::ReadHeadCache(MemoryRange *out_range, CacheEntry *out_entry, bool *out_cache_needed, s64 *offset, s64 *aligned_offset, s64 aligned_offset_end, char **buffer, size_t *size) {
@@ -1069,8 +898,8 @@ namespace ams::fssystem::save {
*out_cache_needed = false;
/* Determine the size to copy. */
- const s64 buffer_offset = *offset - entry.offset;
- const size_t copy_size = std::min(*size, static_cast(entry.offset + entry.size - *offset));
+ const s64 buffer_offset = *offset - entry.range.offset;
+ const size_t copy_size = std::min(*size, static_cast(entry.range.GetEndOffset() - *offset));
/* Copy data from the entry. */
std::memcpy(*buffer, reinterpret_cast(memory_range.first + buffer_offset), copy_size);
@@ -1079,7 +908,7 @@ namespace ams::fssystem::save {
*buffer += copy_size;
*offset += copy_size;
*size -= copy_size;
- *aligned_offset = entry.offset + entry.size;
+ *aligned_offset = entry.range.GetEndOffset();
/* Handle the buffer. */
R_TRY(this->UpdateLastResult(this->StoreOrDestroyBuffer(memory_range, std::addressof(entry))));
@@ -1089,7 +918,7 @@ namespace ams::fssystem::save {
*out_entry = entry;
*out_range = memory_range;
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::ReadTailCache(MemoryRange *out_range, CacheEntry *out_entry, bool *out_cache_needed, s64 offset, s64 aligned_offset, s64 *aligned_offset_end, char *buffer, size_t *size) {
@@ -1121,15 +950,15 @@ namespace ams::fssystem::save {
*out_cache_needed = false;
/* Determine the size to copy. */
- const s64 buffer_offset = std::max(static_cast(0), offset - entry.offset);
- const size_t copy_size = std::min(*size, static_cast(offset + *size - entry.offset));
+ const s64 buffer_offset = std::max(static_cast(0), offset - entry.range.offset);
+ const size_t copy_size = std::min(*size, static_cast(offset + *size - entry.range.offset));
/* Copy data from the entry. */
std::memcpy(buffer + *size - copy_size, reinterpret_cast(memory_range.first + buffer_offset), copy_size);
/* Advance. */
*size -= copy_size;
- *aligned_offset_end = entry.offset;
+ *aligned_offset_end = entry.range.offset;
/* Handle the buffer. */
R_TRY(this->UpdateLastResult(this->StoreOrDestroyBuffer(memory_range, std::addressof(entry))));
@@ -1139,7 +968,7 @@ namespace ams::fssystem::save {
*out_entry = entry;
*out_range = memory_range;
- return ResultSuccess();
+ R_SUCCEED();
}
Result BlockCacheBufferedStorage::BulkRead(s64 offset, void *buffer, size_t size, MemoryRange *range_head, MemoryRange *range_tail, CacheEntry *entry_head, CacheEntry *entry_tail, bool head_cache_needed, bool tail_cache_needed) {
@@ -1158,8 +987,8 @@ namespace ams::fssystem::save {
char *dst = static_cast(buffer);
/* Prepare to do our reads. */
- auto head_guard = SCOPE_GUARD { this->DestroyBuffer(entry_head, *range_head); };
- auto tail_guard = SCOPE_GUARD { this->DestroyBuffer(entry_tail, *range_tail); };
+ auto head_guard = SCOPE_GUARD { m_block_cache_manager.ReleaseCacheEntry(entry_head, *range_head); };
+ auto tail_guard = SCOPE_GUARD { m_block_cache_manager.ReleaseCacheEntry(entry_tail, *range_tail); };
/* Flush the entries. */
R_TRY(this->UpdateLastResult(this->FlushRangeCacheEntries(aligned_offset, aligned_offset_end - aligned_offset, false)));
@@ -1170,9 +999,9 @@ namespace ams::fssystem::save {
char *read_buffer = nullptr;
if (read_offset == aligned_offset && read_size == buffer_size) {
read_buffer = dst;
- } else if (tail_cache_needed && entry_tail->offset == aligned_offset && entry_tail->size == buffer_size) {
+ } else if (tail_cache_needed && entry_tail->range.offset == aligned_offset && entry_tail->range.size == buffer_size) {
read_buffer = reinterpret_cast(range_tail->first);
- } else if (head_cache_needed && entry_head->offset == aligned_offset && entry_head->size == buffer_size) {
+ } else if (head_cache_needed && entry_head->range.offset == aligned_offset && entry_head->range.size == buffer_size) {
read_buffer = reinterpret_cast(range_head->first);
} else {
pooled_buffer.AllocateParticularlyLarge(buffer_size, 1);
@@ -1193,10 +1022,10 @@ namespace ams::fssystem::save {
AMS_ASSERT(entry != nullptr);
AMS_ASSERT(range != nullptr);
- if (aligned_offset <= entry->offset && entry->offset + entry->size <= aligned_offset + buffer_size) {
+ if (aligned_offset <= entry->range.offset && entry->range.GetEndOffset() <= static_cast(aligned_offset + buffer_size)) {
AMS_ASSERT(!entry->is_cached);
if (reinterpret_cast(range->first) != read_buffer) {
- std::memcpy(reinterpret_cast(range->first), read_buffer + entry->offset - aligned_offset, entry->size);
+ std::memcpy(reinterpret_cast(range->first), read_buffer + entry->range.offset - aligned_offset, entry->range.size);
}
entry->is_cached = true;
}
@@ -1214,9 +1043,9 @@ namespace ams::fssystem::save {
/* If both entries are cached, one may contain the other; in that case, we need only the larger entry. */
if (entry_head->is_cached && entry_tail->is_cached) {
- if (entry_tail->offset <= entry_head->offset && entry_head->offset + entry_head->size <= entry_tail->offset + entry_tail->size) {
+ if (entry_tail->range.offset <= entry_head->range.offset && entry_head->range.GetEndOffset() <= entry_tail->range.GetEndOffset()) {
entry_head->is_cached = false;
- } else if (entry_head->offset <= entry_tail->offset && entry_tail->offset + entry_tail->size <= entry_head->offset + entry_head->size) {
+ } else if (entry_head->range.offset <= entry_tail->range.offset && entry_tail->range.GetEndOffset() <= entry_head->range.GetEndOffset()) {
entry_tail->is_cached = false;
}
}
@@ -1226,7 +1055,7 @@ namespace ams::fssystem::save {
if (entry_tail->is_cached) {
R_TRY(this->UpdateLastResult(this->StoreOrDestroyBuffer(*range_tail, entry_tail)));
} else {
- this->DestroyBuffer(entry_tail, *range_tail);
+ m_block_cache_manager.ReleaseCacheEntry(entry_tail, *range_tail);
}
/* Destroy the head cache. */
@@ -1234,10 +1063,10 @@ namespace ams::fssystem::save {
if (entry_head->is_cached) {
R_TRY(this->UpdateLastResult(this->StoreOrDestroyBuffer(*range_head, entry_head)));
} else {
- this->DestroyBuffer(entry_head, *range_head);
+ m_block_cache_manager.ReleaseCacheEntry(entry_head, *range_head);
}
- return ResultSuccess();
+ R_SUCCEED();
}
}
diff --git a/libraries/libstratosphere/source/fssystem/save/fssystem_buffered_storage.cpp b/libraries/libstratosphere/source/fssystem/save/fssystem_buffered_storage.cpp
index abea74829..57f5153d2 100644
--- a/libraries/libstratosphere/source/fssystem/save/fssystem_buffered_storage.cpp
+++ b/libraries/libstratosphere/source/fssystem/save/fssystem_buffered_storage.cpp
@@ -35,7 +35,7 @@ namespace ams::fssystem::save {
private:
BufferedStorage *m_buffered_storage;
std::pair m_memory_range;
- IBufferManager::CacheHandle m_cache_handle;
+ fs::IBufferManager::CacheHandle m_cache_handle;
s64 m_offset;
std::atomic m_is_valid;
std::atomic m_is_dirty;
@@ -139,7 +139,7 @@ namespace ams::fssystem::save {
/* Ensure our buffer state is coherent. */
if (m_memory_range.first != InvalidAddress && !m_is_dirty) {
if (this->IsValid()) {
- m_cache_handle = m_buffered_storage->m_buffer_manager->RegisterCache(m_memory_range.first, m_memory_range.second, IBufferManager::BufferAttribute());
+ m_cache_handle = m_buffered_storage->m_buffer_manager->RegisterCache(m_memory_range.first, m_memory_range.second, fs::IBufferManager::BufferAttribute());
} else {
m_buffered_storage->m_buffer_manager->DeallocateBuffer(m_memory_range.first, m_memory_range.second);
}
@@ -360,11 +360,11 @@ namespace ams::fssystem::save {
}
private:
Result AllocateFetchBuffer() {
- IBufferManager *buffer_manager = m_buffered_storage->m_buffer_manager;
+ fs::IBufferManager *buffer_manager = m_buffered_storage->m_buffer_manager;
AMS_ASSERT(buffer_manager->AcquireCache(m_cache_handle).first == InvalidAddress);
auto range_guard = SCOPE_GUARD { m_memory_range.first = InvalidAddress; };
- R_TRY(buffers::AllocateBufferUsingBufferManagerContext(std::addressof(m_memory_range), buffer_manager, m_buffered_storage->m_block_size, IBufferManager::BufferAttribute(), [](const std::pair &buffer) {
+ R_TRY(buffers::AllocateBufferUsingBufferManagerContext(std::addressof(m_memory_range), buffer_manager, m_buffered_storage->m_block_size, fs::IBufferManager::BufferAttribute(), [](const std::pair &buffer) {
return buffer.first != 0;
}, AMS_CURRENT_FUNCTION_NAME));
@@ -591,7 +591,7 @@ namespace ams::fssystem::save {
this->Finalize();
}
- Result BufferedStorage::Initialize(fs::SubStorage base_storage, IBufferManager *buffer_manager, size_t block_size, s32 buffer_count) {
+ Result BufferedStorage::Initialize(fs::SubStorage base_storage, fs::IBufferManager *buffer_manager, size_t block_size, s32 buffer_count) {
AMS_ASSERT(buffer_manager != nullptr);
AMS_ASSERT(block_size > 0);
AMS_ASSERT(util::IsPowerOfTwo(block_size));
diff --git a/libraries/libstratosphere/source/fssystem/save/fssystem_integrity_verification_storage.cpp b/libraries/libstratosphere/source/fssystem/save/fssystem_integrity_verification_storage.cpp
index 65ab21b2c..6910ecff8 100644
--- a/libraries/libstratosphere/source/fssystem/save/fssystem_integrity_verification_storage.cpp
+++ b/libraries/libstratosphere/source/fssystem/save/fssystem_integrity_verification_storage.cpp
@@ -17,7 +17,7 @@
namespace ams::fssystem::save {
- Result IntegrityVerificationStorage::Initialize(fs::SubStorage hs, fs::SubStorage ds, s64 verif_block_size, s64 upper_layer_verif_block_size, IBufferManager *bm, fssystem::IHash256GeneratorFactory *hgf, const fs::HashSalt &salt, bool is_real_data, fs::StorageType storage_type) {
+ Result IntegrityVerificationStorage::Initialize(fs::SubStorage hs, fs::SubStorage ds, s64 verif_block_size, s64 upper_layer_verif_block_size, fs::IBufferManager *bm, fssystem::IHash256GeneratorFactory *hgf, const fs::HashSalt &salt, bool is_real_data, fs::StorageType storage_type) {
/* Validate preconditions. */
AMS_ASSERT(verif_block_size >= HashSize);
AMS_ASSERT(bm != nullptr);
diff --git a/libraries/libvapours/include/vapours/results/fs_results.hpp b/libraries/libvapours/include/vapours/results/fs_results.hpp
index 0df09509d..466a8735d 100644
--- a/libraries/libvapours/include/vapours/results/fs_results.hpp
+++ b/libraries/libvapours/include/vapours/results/fs_results.hpp
@@ -307,6 +307,7 @@ namespace ams::fs {
R_DEFINE_ERROR_RESULT(UnexpectedInCompressedStorageA, 5324);
R_DEFINE_ERROR_RESULT(UnexpectedInCompressedStorageB, 5325);
R_DEFINE_ERROR_RESULT(UnexpectedInCompressedStorageC, 5326);
+ R_DEFINE_ERROR_RESULT(UnexpectedInCompressedStorageD, 5327);
R_DEFINE_ERROR_RESULT(UnexpectedInPathA, 5328);
R_DEFINE_ERROR_RANGE(PreconditionViolation, 6000, 6499);
@@ -394,6 +395,7 @@ namespace ams::fs {
R_DEFINE_ERROR_RESULT(UnsupportedOperationInLocalFileA, 6378);
R_DEFINE_ERROR_RESULT(UnsupportedOperationInDirectorySaveDataFileSystemA, 6384);
R_DEFINE_ERROR_RESULT(UnsupportedOperationInCompressedStorageA, 6387);
+ R_DEFINE_ERROR_RESULT(UnsupportedOperationInCompressedStorageB, 6388);
R_DEFINE_ERROR_RANGE(PermissionDenied, 6400, 6449);
R_DEFINE_ERROR_RESULT(PermissionDeniedForCreateHostFileSystem, 6403);
diff --git a/libraries/libvapours/include/vapours/util.hpp b/libraries/libvapours/include/vapours/util.hpp
index 081273728..1b9991aa7 100644
--- a/libraries/libvapours/include/vapours/util.hpp
+++ b/libraries/libvapours/include/vapours/util.hpp
@@ -57,6 +57,8 @@
#include
+#include
+
#ifdef ATMOSPHERE_IS_STRATOSPHERE
#include
#endif
diff --git a/libraries/libvapours/include/vapours/util/util_i_function.hpp b/libraries/libvapours/include/vapours/util/util_i_function.hpp
new file mode 100644
index 000000000..813da435a
--- /dev/null
+++ b/libraries/libvapours/include/vapours/util/util_i_function.hpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#pragma once
+#include
+#include
+
+namespace ams::util {
+
+ template
+ class IFunction;
+
+ namespace impl {
+
+ template
+ struct GetIFunctionTypeForObject;
+
+ template
+ struct GetIFunctionTypeForObject { using Type = R(Args...); };
+
+ template
+ struct GetIFunctionTypeForObject { using Type = R(Args...); };
+
+ template
+ struct GetIFunctionType;
+
+ template
+ struct GetIFunctionType { using Type = R(Args...); };
+
+ template
+ struct GetIFunctionType