mirror of
https://github.com/jakcron/nstool
synced 2024-11-15 02:06:40 +00:00
[nstool] Improve Pfs/Romfs export time.
This commit is contained in:
parent
f4d3501f8c
commit
6960911ab0
8 changed files with 45 additions and 61 deletions
|
@ -50,12 +50,9 @@ void HashTreeWrappedIFile::read(byte_t* out, size_t len)
|
|||
size_t start_block = getOffsetBlock(mDataOffset);
|
||||
size_t block_num = align(offset_in_start_block + len, mDataBlockSize) / mDataBlockSize;
|
||||
|
||||
size_t scratch_block_capacity = mScratch.getSize() / mDataBlockSize;
|
||||
|
||||
size_t partial_last_block_num = block_num % scratch_block_capacity;
|
||||
size_t partial_last_block_num = block_num % mCacheBlockNum;
|
||||
bool has_partial_block_num = partial_last_block_num > 0;
|
||||
size_t read_iterations = (block_num / scratch_block_capacity) + has_partial_block_num;
|
||||
|
||||
size_t read_iterations = (block_num / mCacheBlockNum) + has_partial_block_num;
|
||||
|
||||
size_t block_read_len;
|
||||
size_t block_export_offset;
|
||||
|
@ -64,7 +61,7 @@ void HashTreeWrappedIFile::read(byte_t* out, size_t len)
|
|||
for (size_t i = 0; i < read_iterations; i++)
|
||||
{
|
||||
// how many blocks to read from source file
|
||||
block_read_len = (i+1 == read_iterations && has_partial_block_num) ? partial_last_block_num : scratch_block_capacity;
|
||||
block_read_len = (i+1 == read_iterations && has_partial_block_num) ? partial_last_block_num : mCacheBlockNum;
|
||||
|
||||
// offset in this current read to copy from
|
||||
block_export_offset = (i == 0) ? offset_in_start_block : 0;
|
||||
|
@ -79,10 +76,10 @@ void HashTreeWrappedIFile::read(byte_t* out, size_t len)
|
|||
}
|
||||
|
||||
// read the blocks
|
||||
readData(start_block + (i * scratch_block_capacity), block_read_len);
|
||||
readData(start_block + (i * mCacheBlockNum), block_read_len);
|
||||
|
||||
// export the section of data that is relevant
|
||||
memcpy(out + block_export_pos, mScratch.getBytes() + block_export_offset, block_export_size);
|
||||
memcpy(out + block_export_pos, mCache.getBytes() + block_export_offset, block_export_size);
|
||||
|
||||
// update export position
|
||||
block_export_pos += block_export_size;
|
||||
|
@ -165,7 +162,11 @@ void HashTreeWrappedIFile::initialiseDataLayer(const HashTreeMeta& hdr)
|
|||
|
||||
// allocate scratchpad
|
||||
//mScratch.alloc(mDataBlockSize * 0x10);
|
||||
mScratch.alloc(align(kFileExportBlockSize, mDataBlockSize));
|
||||
size_t cache_size = align(kDefaultCacheSize, mDataBlockSize);
|
||||
mCacheBlockNum = cache_size / mDataBlockSize;
|
||||
//printf("Block Size: 0x%" PRIx64 "\n", mDataBlockSize);
|
||||
//printf("Cache size: 0x%" PRIx64 ", (block_num: %" PRId64 ")\n", cache_size, mCacheBlockNum);
|
||||
mCache.alloc(cache_size);
|
||||
}
|
||||
|
||||
void HashTreeWrappedIFile::readData(size_t block_offset, size_t block_num)
|
||||
|
@ -178,7 +179,7 @@ void HashTreeWrappedIFile::readData(size_t block_offset, size_t block_num)
|
|||
if ((block_offset + block_num) == getBlockNum(mData->size()))
|
||||
{
|
||||
read_len = (block_num-1) * mDataBlockSize + getRemanderBlockReadSize(mData->size());
|
||||
memset(mScratch.getBytes(), 0, block_num * mDataBlockSize);
|
||||
memset(mCache.getBytes(), 0, block_num * mDataBlockSize);
|
||||
}
|
||||
else if ((block_offset + block_num) < getBlockNum(mData->size()))
|
||||
{
|
||||
|
@ -190,19 +191,24 @@ void HashTreeWrappedIFile::readData(size_t block_offset, size_t block_num)
|
|||
}
|
||||
|
||||
// read
|
||||
mData->read(mScratch.getBytes(), block_offset * mDataBlockSize, read_len);
|
||||
mData->read(mCache.getBytes(), block_offset * mDataBlockSize, read_len);
|
||||
|
||||
if (block_num > mCacheBlockNum)
|
||||
{
|
||||
throw fnd::Exception(kModuleName, "Read excessive of cache size");
|
||||
}
|
||||
|
||||
//printf("readlen=0x%" PRIx64 "\n", read_len);
|
||||
|
||||
// validate blocks
|
||||
size_t validate_size;
|
||||
for (size_t i = 0; i < block_num; i++)
|
||||
{
|
||||
validate_size = mAlignHashCalcToBlock? mDataBlockSize : MIN(read_len - (i * mDataBlockSize), mDataBlockSize);
|
||||
crypto::sha::Sha256(mScratch.getBytes() + (i * mDataBlockSize), validate_size, hash.bytes);
|
||||
crypto::sha::Sha256(mCache.getBytes() + (i * mDataBlockSize), validate_size, hash.bytes);
|
||||
if (hash != mDataHashLayer[block_offset + i])
|
||||
{
|
||||
mErrorSs << "Hash tree layer verification failed (layer: data, block: " << (block_offset + i) << ", offset: 0x" << std::hex << ((block_offset + i) * mDataBlockSize) << ", size: 0x" << std::hex << validate_size <<")";
|
||||
mErrorSs << "Hash tree layer verification failed (layer: data, block: " << (block_offset + i) << " ( " << i << "/" << block_num-1 << " ), offset: 0x" << std::hex << ((block_offset + i) * mDataBlockSize) << ", size: 0x" << std::hex << validate_size <<")";
|
||||
throw fnd::Exception(kModuleName, mErrorSs.str());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ public:
|
|||
void write(const byte_t* out, size_t offset, size_t len);
|
||||
private:
|
||||
const std::string kModuleName = "HashTreeWrappedIFile";
|
||||
static const size_t kFileExportBlockSize = 0x1000000;
|
||||
static const size_t kDefaultCacheSize = 0x10000;
|
||||
std::stringstream mErrorSs;
|
||||
|
||||
bool mOwnIFile;
|
||||
|
@ -34,7 +34,8 @@ private:
|
|||
fnd::List<crypto::sha::sSha256Hash> mDataHashLayer;
|
||||
bool mAlignHashCalcToBlock;
|
||||
|
||||
fnd::MemoryBlob mScratch;
|
||||
fnd::MemoryBlob mCache;
|
||||
size_t mCacheBlockNum;
|
||||
|
||||
inline size_t getOffsetBlock(size_t offset) const { return offset / mDataBlockSize; }
|
||||
inline size_t getOffsetInBlock(size_t offset) const { return offset % mDataBlockSize; }
|
||||
|
|
|
@ -681,7 +681,7 @@ void NcaProcess::displayHeader()
|
|||
printf(" |-----|----------------------------------|----------------------------------|\n");
|
||||
for (size_t i = 0; i < mBodyKeys.keak_list.getSize(); i++)
|
||||
{
|
||||
printf(" | %3lu | ", mBodyKeys.keak_list[i].index);
|
||||
printf(" | %3d | ", mBodyKeys.keak_list[i].index);
|
||||
|
||||
_HEXDUMP_L(mBodyKeys.keak_list[i].enc.key, 16);
|
||||
//for (size_t j = 0; j < 16; j++) printf("%02x", mBodyKeys.keak_list[i].enc.key[j]);
|
||||
|
|
|
@ -463,7 +463,7 @@ void NpdmProcess::displayAciHdr(const nx::AciHeader& aci)
|
|||
else if (aci.getAciType() == nx::AciBinary::TYPE_ACID)
|
||||
{
|
||||
|
||||
printf(" ACID Size: %" PRIx64 "\n", aci.getAcidSize());
|
||||
printf(" ACID Size: %" PRIx64 "\n", (uint64_t)aci.getAcidSize());
|
||||
printf(" Flags: \n");
|
||||
printf(" Production: %s\n", aci.isProduction() ? "TRUE" : "FALSE");
|
||||
printf(" UnqualifiedApproval: %s\n", aci.isUnqualifiedApproval() ? "TRUE" : "FALSE");
|
||||
|
|
|
@ -96,7 +96,7 @@ void PfsProcess::displayHeader()
|
|||
{
|
||||
printf("[PartitionFS]\n");
|
||||
printf(" Type: %s\n", mPfs.getFsType() == mPfs.TYPE_PFS0? "PFS0" : "HFS0");
|
||||
printf(" FileNum: %" PRId64 "\n", mPfs.getFileList().getSize());
|
||||
printf(" FileNum: %" PRId64 "\n", (uint64_t)mPfs.getFileList().getSize());
|
||||
if (mMountName.empty() == false)
|
||||
printf(" MountPoint: %s%s\n", mMountName.c_str(), mMountName.at(mMountName.length()-1) != '/' ? "/" : "");
|
||||
}
|
||||
|
@ -109,9 +109,9 @@ void PfsProcess::displayFs()
|
|||
if (mCliOutputType >= OUTPUT_VERBOSE)
|
||||
{
|
||||
if (mPfs.getFsType() == mPfs.TYPE_PFS0)
|
||||
printf(" (offset=0x%" PRIx64 ", size=0x%" PRIx64 ")\n", mPfs.getFileList()[i].offset, mPfs.getFileList()[i].size);
|
||||
printf(" (offset=0x%" PRIx64 ", size=0x%" PRIx64 ")\n", (uint64_t)mPfs.getFileList()[i].offset, (uint64_t)mPfs.getFileList()[i].size);
|
||||
else
|
||||
printf(" (offset=0x%" PRIx64 ", size=0x%" PRIx64 ", hash_protected_size=0x%" PRIx64 ")\n", mPfs.getFileList()[i].offset, mPfs.getFileList()[i].size, mPfs.getFileList()[i].hash_protected_size);
|
||||
printf(" (offset=0x%" PRIx64 ", size=0x%" PRIx64 ", hash_protected_size=0x%" PRIx64 ")\n", (uint64_t)mPfs.getFileList()[i].offset, (uint64_t)mPfs.getFileList()[i].size, (uint64_t)mPfs.getFileList()[i].hash_protected_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -139,16 +139,13 @@ bool PfsProcess::validateHeaderMagic(const nx::sPfsHeader* hdr)
|
|||
|
||||
void PfsProcess::validateHfs()
|
||||
{
|
||||
// allocate when validate is invoked
|
||||
mFileExtractBlock.alloc(kFileExportBlockSize);
|
||||
|
||||
crypto::sha::sSha256Hash hash;
|
||||
const fnd::List<nx::PfsHeader::sFile>& file = mPfs.getFileList();
|
||||
for (size_t i = 0; i < file.getSize(); i++)
|
||||
{
|
||||
mFileExtractBlock.alloc(file[i].hash_protected_size);
|
||||
mReader->read(mFileExtractBlock.getBytes(), file[i].offset, file[i].hash_protected_size);
|
||||
crypto::sha::Sha256(mFileExtractBlock.getBytes(), mFileExtractBlock.getSize(), hash.bytes);
|
||||
mCache.alloc(file[i].hash_protected_size);
|
||||
mReader->read(mCache.getBytes(), file[i].offset, file[i].hash_protected_size);
|
||||
crypto::sha::Sha256(mCache.getBytes(), mCache.getSize(), hash.bytes);
|
||||
if (hash != file[i].hash)
|
||||
{
|
||||
if (mCliOutputType >= OUTPUT_MINIMAL)
|
||||
|
@ -161,7 +158,7 @@ void PfsProcess::validateHfs()
|
|||
void PfsProcess::extractFs()
|
||||
{
|
||||
// allocate only when extractDir is invoked
|
||||
mFileExtractBlock.alloc(kFileExportBlockSize);
|
||||
mCache.alloc(kCacheSize);
|
||||
|
||||
// make extract dir
|
||||
fnd::io::makeDirectory(mExtractPath);
|
||||
|
@ -181,15 +178,10 @@ void PfsProcess::extractFs()
|
|||
|
||||
outFile.open(file_path, outFile.Create);
|
||||
mReader->seek(file[i].offset);
|
||||
for (size_t j = 0; j < (file[i].size / kFileExportBlockSize); j++)
|
||||
for (size_t j = 0; j < ((file[i].size / kCacheSize) + ((file[i].size % kCacheSize) != 0)); j++)
|
||||
{
|
||||
mReader->read(mFileExtractBlock.getBytes(), kFileExportBlockSize);
|
||||
outFile.write(mFileExtractBlock.getBytes(), kFileExportBlockSize);
|
||||
}
|
||||
if (file[i].size % kFileExportBlockSize)
|
||||
{
|
||||
mReader->read(mFileExtractBlock.getBytes(), file[i].size % kFileExportBlockSize);
|
||||
outFile.write(mFileExtractBlock.getBytes(), file[i].size % kFileExportBlockSize);
|
||||
mReader->read(mCache.getBytes(), MIN(file[i].size - (kCacheSize * j),kCacheSize));
|
||||
outFile.write(mCache.getBytes(), MIN(file[i].size - (kCacheSize * j),kCacheSize));
|
||||
}
|
||||
outFile.close();
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ public:
|
|||
|
||||
private:
|
||||
const std::string kModuleName = "PfsProcess";
|
||||
static const size_t kFileExportBlockSize = 0x1000000;
|
||||
static const size_t kCacheSize = 0x10000;
|
||||
|
||||
fnd::IFile* mReader;
|
||||
CliOutputType mCliOutputType;
|
||||
|
@ -39,7 +39,7 @@ private:
|
|||
std::string mMountName;
|
||||
bool mListFs;
|
||||
|
||||
fnd::MemoryBlob mFileExtractBlock;
|
||||
fnd::MemoryBlob mCache;
|
||||
|
||||
nx::PfsHeader mPfs;
|
||||
|
||||
|
|
|
@ -121,8 +121,8 @@ void RomfsProcess::displayDir(const sDirectory& dir, size_t tab) const
|
|||
void RomfsProcess::displayHeader()
|
||||
{
|
||||
printf("[RomFS]\n");
|
||||
printf(" DirNum: %" PRId64 "\n", mDirNum);
|
||||
printf(" FileNum: %" PRId64 "\n", mFileNum);
|
||||
printf(" DirNum: %" PRId64 "\n", (uint64_t)mDirNum);
|
||||
printf(" FileNum: %" PRId64 "\n", (uint64_t)mFileNum);
|
||||
if (mMountName.empty() == false)
|
||||
printf(" MountPoint: %s%s\n", mMountName.c_str(), mMountName.at(mMountName.length()-1) != '/' ? "/" : "");
|
||||
}
|
||||
|
@ -142,18 +142,9 @@ void RomfsProcess::extractDir(const std::string& path, const sDirectory& dir)
|
|||
if (dir.name.empty() == false)
|
||||
fnd::io::appendToPath(dir_path, dir.name);
|
||||
|
||||
//printf("dirpath=[%s]\n", dir_path.c_str());
|
||||
|
||||
// make directory
|
||||
fnd::io::makeDirectory(dir_path);
|
||||
|
||||
|
||||
// allocate memory for file extraction
|
||||
#ifdef NSTOOL_ALLOC_UNIQUE_SCRATCH
|
||||
fnd::MemoryBlob scratch;
|
||||
scratch.alloc(kFileExportBlockSize);
|
||||
#endif
|
||||
|
||||
// extract files
|
||||
fnd::SimpleFile outFile;
|
||||
for (size_t i = 0; i < dir.file_list.getSize(); i++)
|
||||
|
@ -168,15 +159,10 @@ void RomfsProcess::extractDir(const std::string& path, const sDirectory& dir)
|
|||
|
||||
outFile.open(file_path, outFile.Create);
|
||||
mReader->seek(dir.file_list[i].offset);
|
||||
for (size_t j = 0; j < (dir.file_list[i].size / kFileExportBlockSize); j++)
|
||||
for (size_t j = 0; j < ((dir.file_list[i].size / kCacheSize) + ((dir.file_list[i].size % kCacheSize) != 0)); j++)
|
||||
{
|
||||
mReader->read(mFileExtractBlock.getBytes(), kFileExportBlockSize);
|
||||
outFile.write(mFileExtractBlock.getBytes(), kFileExportBlockSize);
|
||||
}
|
||||
if (dir.file_list[i].size % kFileExportBlockSize)
|
||||
{
|
||||
mReader->read(mFileExtractBlock.getBytes(), dir.file_list[i].size % kFileExportBlockSize);
|
||||
outFile.write(mFileExtractBlock.getBytes(), dir.file_list[i].size % kFileExportBlockSize);
|
||||
mReader->read(mCache.getBytes(), MIN(dir.file_list[i].size - (kCacheSize * j),kCacheSize));
|
||||
outFile.write(mCache.getBytes(), MIN(dir.file_list[i].size - (kCacheSize * j),kCacheSize));
|
||||
}
|
||||
outFile.close();
|
||||
}
|
||||
|
@ -191,7 +177,7 @@ void RomfsProcess::extractDir(const std::string& path, const sDirectory& dir)
|
|||
void RomfsProcess::extractFs()
|
||||
{
|
||||
// allocate only when extractDir is invoked
|
||||
mFileExtractBlock.alloc(kFileExportBlockSize);
|
||||
mCache.alloc(kCacheSize);
|
||||
extractDir(mExtractPath, mRootDir);
|
||||
}
|
||||
|
||||
|
|
|
@ -106,8 +106,7 @@ public:
|
|||
const sDirectory& getRootDir() const;
|
||||
private:
|
||||
const std::string kModuleName = "RomfsProcess";
|
||||
static const size_t kFileExportBlockSize = 0x1000000;
|
||||
//static const size_t kFileExportBlockSize = 0x1000000;
|
||||
static const size_t kCacheSize = 0x10000;
|
||||
|
||||
fnd::IFile* mReader;
|
||||
CliOutputType mCliOutputType;
|
||||
|
@ -118,7 +117,7 @@ private:
|
|||
std::string mMountName;
|
||||
bool mListFs;
|
||||
|
||||
fnd::MemoryBlob mFileExtractBlock;
|
||||
fnd::MemoryBlob mCache;
|
||||
|
||||
size_t mDirNum;
|
||||
size_t mFileNum;
|
||||
|
|
Loading…
Reference in a new issue