2020-07-04 18:58:21 +00:00
/*
LZ4 - Fast LZ compression algorithm
Copyright ( C ) 2011 - 2017 , Yann Collet .
BSD 2 - Clause License ( http : //www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms , with or without
modification , are permitted provided that the following conditions are
met :
* Redistributions of source code must retain the above copyright
notice , this list of conditions and the following disclaimer .
* Redistributions in binary form must reproduce the above
copyright notice , this list of conditions and the following disclaimer
in the documentation and / or other materials provided with the
distribution .
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
" AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
You can contact the author at :
- LZ4 homepage : http : //www.lz4.org
- LZ4 source repository : https : //github.com/lz4/lz4
*/
/*-************************************
* Tuning parameters
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* ACCELERATION_DEFAULT :
* Select " acceleration " for LZ4_compress_fast ( ) when parameter value < = 0
*/
# define ACCELERATION_DEFAULT 1
/*-************************************
* Dependency
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# define LZ4_STATIC_LINKING_ONLY
# include "lz4.h"
/* see also "memory routines" below */
/*-************************************
* Compiler Options
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# ifndef LZ4_FORCE_INLINE
# ifdef _MSC_VER /* Visual Studio */
# define LZ4_FORCE_INLINE static __forceinline
# else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
# else
# define LZ4_FORCE_INLINE static inline
# endif
# else
# define LZ4_FORCE_INLINE static
# endif /* __STDC_VERSION__ */
# endif /* _MSC_VER */
# endif /* LZ4_FORCE_INLINE */
/* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE
* Gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy ,
* together with a simple 8 - byte copy loop as a fall - back path .
* However , this optimization hurts the decompression speed by > 30 % ,
* because the execution does not go to the optimized loop
* for typical compressible data , and all of the preamble checks
* before going to the fall - back path become useless overhead .
* This optimization happens only with the - O3 flag , and - O2 generates
* a simple 8 - byte copy loop .
* With gcc on ppc64le , all of the LZ4_decompress_ * and LZ4_wildCopy
* functions are annotated with __attribute__ ( ( optimize ( " O2 " ) ) ) ,
* and also LZ4_wildCopy is forcibly inlined , so that the O2 attribute
* of LZ4_wildCopy does not affect the compression speed .
*/
# if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__)
# define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2")))
# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE __attribute__((optimize("O2"))) LZ4_FORCE_INLINE
# else
# define LZ4_FORCE_O2_GCC_PPC64LE
# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static
# endif
/*-************************************
* Memory routines
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include <mem/heap.h> /* malloc, calloc, free */
# define ALLOC(s) malloc(s)
2024-03-27 07:00:53 +00:00
# define ALLOC_AND_ZERO(s) zalloc(s)
2020-07-04 18:58:21 +00:00
# define FREEMEM free
# include <string.h> /* memset, memcpy */
# define MEM_INIT memset
/*-************************************
* Basic Types
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
typedef uint16_t U16 ;
typedef uint32_t U32 ;
typedef int32_t S32 ;
typedef uint64_t U64 ;
typedef uintptr_t uptrval ;
typedef size_t reg_t ; /* 32-bits in x32 mode */
/*-************************************
* Reading and writing into memory
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static unsigned LZ4_isLittleEndian ( void )
{
const union { U32 u ; BYTE c [ 4 ] ; } one = { 1 } ; /* don't use static : performance detrimental */
return one . c [ 0 ] ;
}
static U16 LZ4_read16 ( const void * memPtr )
{
U16 val ; memcpy ( & val , memPtr , sizeof ( val ) ) ; return val ;
}
static U32 LZ4_read32 ( const void * memPtr )
{
U32 val ; memcpy ( & val , memPtr , sizeof ( val ) ) ; return val ;
}
static reg_t LZ4_read_ARCH ( const void * memPtr )
{
reg_t val ; memcpy ( & val , memPtr , sizeof ( val ) ) ; return val ;
}
static void LZ4_write16 ( void * memPtr , U16 value )
{
memcpy ( memPtr , & value , sizeof ( value ) ) ;
}
static void LZ4_write32 ( void * memPtr , U32 value )
{
memcpy ( memPtr , & value , sizeof ( value ) ) ;
}
static U16 LZ4_readLE16 ( const void * memPtr )
{
if ( LZ4_isLittleEndian ( ) ) {
return LZ4_read16 ( memPtr ) ;
} else {
const BYTE * p = ( const BYTE * ) memPtr ;
return ( U16 ) ( ( U16 ) p [ 0 ] + ( p [ 1 ] < < 8 ) ) ;
}
}
static void LZ4_writeLE16 ( void * memPtr , U16 value )
{
if ( LZ4_isLittleEndian ( ) ) {
LZ4_write16 ( memPtr , value ) ;
} else {
BYTE * p = ( BYTE * ) memPtr ;
p [ 0 ] = ( BYTE ) value ;
p [ 1 ] = ( BYTE ) ( value > > 8 ) ;
}
}
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
LZ4_FORCE_O2_INLINE_GCC_PPC64LE
void LZ4_wildCopy ( void * dstPtr , const void * srcPtr , void * dstEnd )
{
BYTE * d = ( BYTE * ) dstPtr ;
const BYTE * s = ( const BYTE * ) srcPtr ;
BYTE * const e = ( BYTE * ) dstEnd ;
do { memcpy ( d , s , 8 ) ; d + = 8 ; s + = 8 ; } while ( d < e ) ;
}
/*-************************************
* Common Constants
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# define MINMATCH 4
# define WILDCOPYLENGTH 8
# define LASTLITERALS 5
# define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
static const int LZ4_minLength = ( MFLIMIT + 1 ) ;
# define KB *(1 <<10)
# define MB *(1 <<20)
# define GB *(1U<<30)
# define MAXD_LOG 16
# define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
# define ML_BITS 4
# define ML_MASK ((1U<<ML_BITS)-1)
# define RUN_BITS (8-ML_BITS)
# define RUN_MASK ((1U<<RUN_BITS)-1)
/*-************************************
* Error detection
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1 / (int)(!!(c)) }; } /* use only *after* variable declarations */
# if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
# include <stdio.h>
static int g_debuglog_enable = 1 ;
# define DEBUGLOG(l, ...) { \
if ( ( g_debuglog_enable ) & & ( l < = LZ4_DEBUG ) ) { \
fprintf ( stderr , __FILE__ " : " ) ; \
fprintf ( stderr , __VA_ARGS__ ) ; \
fprintf ( stderr , " \n " ) ; \
} }
# else
# define DEBUGLOG(l, ...) {} /* disabled */
# endif
/*-************************************
* Common functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static unsigned LZ4_NbCommonBytes ( reg_t val )
{
if ( LZ4_isLittleEndian ( ) ) {
if ( sizeof ( val ) = = 8 ) {
return ( __builtin_ctzll ( ( U64 ) val ) > > 3 ) ;
} else /* 32 bits */ {
return ( __builtin_ctz ( ( U32 ) val ) > > 3 ) ;
}
} else /* Big Endian CPU */ {
if ( sizeof ( val ) = = 8 ) { /* 64-bits */
return ( __builtin_clzll ( ( U64 ) val ) > > 3 ) ;
} else /* 32 bits */ {
return ( __builtin_clz ( ( U32 ) val ) > > 3 ) ;
}
}
}
# define STEPSIZE sizeof(reg_t)
LZ4_FORCE_INLINE
unsigned LZ4_count ( const BYTE * pIn , const BYTE * pMatch , const BYTE * pInLimit )
{
const BYTE * const pStart = pIn ;
if ( likely ( pIn < pInLimit - ( STEPSIZE - 1 ) ) ) {
reg_t const diff = LZ4_read_ARCH ( pMatch ) ^ LZ4_read_ARCH ( pIn ) ;
if ( ! diff ) {
pIn + = STEPSIZE ; pMatch + = STEPSIZE ;
} else {
return LZ4_NbCommonBytes ( diff ) ;
} }
while ( likely ( pIn < pInLimit - ( STEPSIZE - 1 ) ) ) {
reg_t const diff = LZ4_read_ARCH ( pMatch ) ^ LZ4_read_ARCH ( pIn ) ;
if ( ! diff ) { pIn + = STEPSIZE ; pMatch + = STEPSIZE ; continue ; }
pIn + = LZ4_NbCommonBytes ( diff ) ;
return ( unsigned ) ( pIn - pStart ) ;
}
if ( ( STEPSIZE = = 8 ) & & ( pIn < ( pInLimit - 3 ) ) & & ( LZ4_read32 ( pMatch ) = = LZ4_read32 ( pIn ) ) ) { pIn + = 4 ; pMatch + = 4 ; }
if ( ( pIn < ( pInLimit - 1 ) ) & & ( LZ4_read16 ( pMatch ) = = LZ4_read16 ( pIn ) ) ) { pIn + = 2 ; pMatch + = 2 ; }
if ( ( pIn < pInLimit ) & & ( * pMatch = = * pIn ) ) pIn + + ;
return ( unsigned ) ( pIn - pStart ) ;
}
# ifndef LZ4_COMMONDEFS_ONLY
/*-************************************
* Local Constants
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static const int LZ4_64Klimit = ( ( 64 KB ) + ( MFLIMIT - 1 ) ) ;
static const U32 LZ4_skipTrigger = 6 ; /* Increase this value ==> compression run slower on incompressible data */
/*-************************************
* Local Structures and types
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
typedef enum { notLimited = 0 , limitedOutput = 1 } limitedOutput_directive ;
typedef enum { clearedTable = 0 , byPtr , byU32 , byU16 } tableType_t ;
/**
* This enum distinguishes several different modes of accessing previous
* content in the stream .
*
* - noDict : There is no preceding content .
* - withPrefix64k : Table entries up to ctx - > dictSize before the current blob
* blob being compressed are valid and refer to the preceding
* content ( of length ctx - > dictSize ) , which is available
* contiguously preceding in memory the content currently
* being compressed .
* - usingExtDict : Like withPrefix64k , but the preceding content is somewhere
* else in memory , starting at ctx - > dictionary with length
* ctx - > dictSize .
* - usingDictCtx : Like usingExtDict , but everything concerning the preceding
* content is in a separate context , pointed to by
* ctx - > dictCtx . ctx - > dictionary , ctx - > dictSize , and table
* entries in the current context that refer to positions
* preceding the beginning of the current compression are
* ignored . Instead , ctx - > dictCtx - > dictionary and ctx - > dictCtx
* - > dictSize describe the location and size of the preceding
* content , and matches are found by looking in the ctx
* - > dictCtx - > hashTable .
*/
typedef enum { noDict = 0 , withPrefix64k , usingExtDict , usingDictCtx } dict_directive ;
typedef enum { noDictIssue = 0 , dictSmall } dictIssue_directive ;
typedef enum { endOnOutputSize = 0 , endOnInputSize = 1 } endCondition_directive ;
typedef enum { full = 0 , partial = 1 } earlyEnd_directive ;
/*-************************************
* Local Utils
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
int LZ4_versionNumber ( void ) { return LZ4_VERSION_NUMBER ; }
const char * LZ4_versionString ( void ) { return LZ4_VERSION_STRING ; }
int LZ4_compressBound ( int isize ) { return LZ4_COMPRESSBOUND ( isize ) ; }
int LZ4_sizeofState ( ) { return LZ4_STREAMSIZE ; }
/*-******************************
* Compression functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static U32 LZ4_hash4 ( U32 sequence , tableType_t const tableType )
{
if ( tableType = = byU16 )
return ( ( sequence * 2654435761U ) > > ( ( MINMATCH * 8 ) - ( LZ4_HASHLOG + 1 ) ) ) ;
else
return ( ( sequence * 2654435761U ) > > ( ( MINMATCH * 8 ) - LZ4_HASHLOG ) ) ;
}
static U32 LZ4_hash5 ( U64 sequence , tableType_t const tableType )
{
static const U64 prime5bytes = 889523592379ULL ;
static const U64 prime8bytes = 11400714785074694791ULL ;
const U32 hashLog = ( tableType = = byU16 ) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG ;
if ( LZ4_isLittleEndian ( ) )
return ( U32 ) ( ( ( sequence < < 24 ) * prime5bytes ) > > ( 64 - hashLog ) ) ;
else
return ( U32 ) ( ( ( sequence > > 24 ) * prime8bytes ) > > ( 64 - hashLog ) ) ;
}
LZ4_FORCE_INLINE U32 LZ4_hashPosition ( const void * const p , tableType_t const tableType )
{
if ( ( sizeof ( reg_t ) = = 8 ) & & ( tableType ! = byU16 ) ) return LZ4_hash5 ( LZ4_read_ARCH ( p ) , tableType ) ;
return LZ4_hash4 ( LZ4_read32 ( p ) , tableType ) ;
}
static void LZ4_putIndexOnHash ( U32 idx , U32 h , void * tableBase , tableType_t const tableType )
{
switch ( tableType )
{
default : /* fallthrough */
case clearedTable : /* fallthrough */
case byPtr : { /* illegal! */ return ; }
case byU32 : { U32 * hashTable = ( U32 * ) tableBase ; hashTable [ h ] = idx ; return ; }
case byU16 : { U16 * hashTable = ( U16 * ) tableBase ; hashTable [ h ] = ( U16 ) idx ; return ; }
}
}
static void LZ4_putPositionOnHash ( const BYTE * p , U32 h , void * tableBase , tableType_t const tableType , const BYTE * srcBase )
{
switch ( tableType )
{
case clearedTable : { /* illegal! */ return ; }
case byPtr : { const BYTE * * hashTable = ( const BYTE * * ) tableBase ; hashTable [ h ] = p ; return ; }
case byU32 : { U32 * hashTable = ( U32 * ) tableBase ; hashTable [ h ] = ( U32 ) ( p - srcBase ) ; return ; }
case byU16 : { U16 * hashTable = ( U16 * ) tableBase ; hashTable [ h ] = ( U16 ) ( p - srcBase ) ; return ; }
}
}
LZ4_FORCE_INLINE void LZ4_putPosition ( const BYTE * p , void * tableBase , tableType_t tableType , const BYTE * srcBase )
{
U32 const h = LZ4_hashPosition ( p , tableType ) ;
LZ4_putPositionOnHash ( p , h , tableBase , tableType , srcBase ) ;
}
/* LZ4_getIndexOnHash() :
* Index of match position registered in hash table .
* hash position must be calculated by using base + index , or dictBase + index .
* Assumption 1 : only valid if tableType = = byU32 or byU16 .
* Assumption 2 : h is presumed valid ( within limits of hash table )
*/
static U32 LZ4_getIndexOnHash ( U32 h , const void * tableBase , tableType_t tableType )
{
LZ4_STATIC_ASSERT ( LZ4_MEMORY_USAGE > 2 ) ;
if ( tableType = = byU32 ) { const U32 * const hashTable = ( const U32 * ) tableBase ; return hashTable [ h ] ; }
if ( tableType = = byU16 ) { const U16 * const hashTable = ( const U16 * ) tableBase ; return hashTable [ h ] ; }
return 0 ; /* forbidden case */
}
static const BYTE * LZ4_getPositionOnHash ( U32 h , const void * tableBase , tableType_t tableType , const BYTE * srcBase )
{
if ( tableType = = byPtr ) { const BYTE * const * hashTable = ( const BYTE * const * ) tableBase ; return hashTable [ h ] ; }
if ( tableType = = byU32 ) { const U32 * const hashTable = ( const U32 * ) tableBase ; return hashTable [ h ] + srcBase ; }
{ const U16 * const hashTable = ( const U16 * ) tableBase ; return hashTable [ h ] + srcBase ; } /* default, to ensure a return */
}
LZ4_FORCE_INLINE const BYTE * LZ4_getPosition ( const BYTE * p , const void * tableBase , tableType_t tableType , const BYTE * srcBase )
{
U32 const h = LZ4_hashPosition ( p , tableType ) ;
return LZ4_getPositionOnHash ( h , tableBase , tableType , srcBase ) ;
}
LZ4_FORCE_INLINE void LZ4_prepareTable (
LZ4_stream_t_internal * const cctx ,
const int inputSize ,
const tableType_t tableType ) {
/* If the table hasn't been used, it's guaranteed to be zeroed out, and is
* therefore safe to use no matter what mode we ' re in . Otherwise , we figure
* out if it ' s safe to leave as is or whether it needs to be reset .
*/
if ( cctx - > tableType ! = clearedTable ) {
if ( cctx - > tableType ! = tableType
| | ( tableType = = byU16 & & cctx - > currentOffset + inputSize > = 0xFFFFU )
| | ( tableType = = byU32 & & cctx - > currentOffset > 1 GB )
| | tableType = = byPtr
| | inputSize > = 4 KB )
{
DEBUGLOG ( 4 , " LZ4_prepareTable: Resetting table in %p " , cctx ) ;
MEM_INIT ( cctx - > hashTable , 0 , LZ4_HASHTABLESIZE ) ;
cctx - > currentOffset = 0 ;
cctx - > tableType = clearedTable ;
} else {
DEBUGLOG ( 4 , " LZ4_prepareTable: Re-use hash table (no reset) " ) ;
}
}
/* Adding a gap, so all previous entries are > MAX_DISTANCE back, is faster
* than compressing without a gap . However , compressing with
* currentOffset = = 0 is faster still , so we preserve that case .
*/
if ( cctx - > currentOffset ! = 0 & & tableType = = byU32 ) {
DEBUGLOG ( 5 , " LZ4_prepareTable: adding 64KB to currentOffset " ) ;
cctx - > currentOffset + = 64 KB ;
}
/* Finally, clear history */
cctx - > dictCtx = NULL ;
cctx - > dictionary = NULL ;
cctx - > dictSize = 0 ;
}
/** LZ4_compress_generic() :
inlined , to ensure branches are decided at compilation time */
LZ4_FORCE_INLINE int LZ4_compress_generic (
LZ4_stream_t_internal * const cctx ,
const char * const source ,
char * const dest ,
const int inputSize ,
const int maxOutputSize ,
const limitedOutput_directive outputLimited ,
const tableType_t tableType ,
const dict_directive dictDirective ,
const dictIssue_directive dictIssue ,
const U32 acceleration )
{
const BYTE * ip = ( const BYTE * ) source ;
U32 const startIndex = cctx - > currentOffset ;
const BYTE * base = ( const BYTE * ) source - startIndex ;
const BYTE * lowLimit ;
const LZ4_stream_t_internal * dictCtx = ( const LZ4_stream_t_internal * ) cctx - > dictCtx ;
const BYTE * const dictionary =
dictDirective = = usingDictCtx ? dictCtx - > dictionary : cctx - > dictionary ;
const U32 dictSize =
dictDirective = = usingDictCtx ? dictCtx - > dictSize : cctx - > dictSize ;
const U32 dictDelta = ( dictDirective = = usingDictCtx ) ? startIndex - dictCtx - > currentOffset : 0 ; /* make indexes in dictCtx comparable with index in current context */
int const maybe_extMem = ( dictDirective = = usingExtDict ) | | ( dictDirective = = usingDictCtx ) ;
U32 const prefixIdxLimit = startIndex - dictSize ; /* used when dictDirective == dictSmall */
const BYTE * const dictEnd = dictionary + dictSize ;
const BYTE * anchor = ( const BYTE * ) source ;
const BYTE * const iend = ip + inputSize ;
const BYTE * const mflimitPlusOne = iend - MFLIMIT + 1 ;
const BYTE * const matchlimit = iend - LASTLITERALS ;
/* the dictCtx currentOffset is indexed on the start of the dictionary,
* while a dictionary in the current context precedes the currentOffset */
const BYTE * dictBase = dictDirective = = usingDictCtx ?
dictionary + dictSize - dictCtx - > currentOffset : /* is it possible that dictCtx->currentOffset != dictCtx->dictSize ? Yes if the dictionary context is not reset */
dictionary + dictSize - startIndex ;
BYTE * op = ( BYTE * ) dest ;
BYTE * const olimit = op + maxOutputSize ;
U32 offset = 0 ;
U32 forwardH ;
DEBUGLOG ( 5 , " LZ4_compress_generic: srcSize=%i, tableType=%u " , inputSize , tableType ) ;
/* Init conditions */
if ( ( U32 ) inputSize > ( U32 ) LZ4_MAX_INPUT_SIZE ) return 0 ; /* Unsupported inputSize, too large (or negative) */
lowLimit = ( const BYTE * ) source - ( dictDirective = = withPrefix64k ? dictSize : 0 ) ;
if ( ( tableType = = byU16 ) & & ( inputSize > = LZ4_64Klimit ) ) return 0 ; /* Size too large (not within 64K limit) */
/* Update context state */
if ( dictDirective = = usingDictCtx ) {
/* Subsequent linked blocks can't use the dictionary. */
/* Instead, they use the block we just compressed. */
cctx - > dictCtx = NULL ;
cctx - > dictSize = ( U32 ) inputSize ;
} else {
cctx - > dictSize + = ( U32 ) inputSize ;
}
cctx - > currentOffset + = ( U32 ) inputSize ;
cctx - > tableType = tableType ;
if ( inputSize < LZ4_minLength ) goto _last_literals ; /* Input too small, no compression (all literals) */
/* First Byte */
LZ4_putPosition ( ip , cctx - > hashTable , tableType , base ) ;
ip + + ; forwardH = LZ4_hashPosition ( ip , tableType ) ;
/* Main Loop */
for ( ; ; ) {
const BYTE * match ;
BYTE * token ;
/* Find a match */
if ( tableType = = byPtr ) {
const BYTE * forwardIp = ip ;
unsigned step = 1 ;
unsigned searchMatchNb = acceleration < < LZ4_skipTrigger ;
do {
U32 const h = forwardH ;
ip = forwardIp ;
forwardIp + = step ;
step = ( searchMatchNb + + > > LZ4_skipTrigger ) ;
if ( unlikely ( forwardIp > mflimitPlusOne ) ) goto _last_literals ;
match = LZ4_getPositionOnHash ( h , cctx - > hashTable , tableType , base ) ;
forwardH = LZ4_hashPosition ( forwardIp , tableType ) ;
LZ4_putPositionOnHash ( ip , h , cctx - > hashTable , tableType , base ) ;
} while ( ( match + MAX_DISTANCE < ip )
| | ( LZ4_read32 ( match ) ! = LZ4_read32 ( ip ) ) ) ;
} else { /* byU32, byU16 */
const BYTE * forwardIp = ip ;
unsigned step = 1 ;
unsigned searchMatchNb = acceleration < < LZ4_skipTrigger ;
do {
U32 const h = forwardH ;
U32 const current = ( U32 ) ( forwardIp - base ) ;
U32 matchIndex = LZ4_getIndexOnHash ( h , cctx - > hashTable , tableType ) ;
ip = forwardIp ;
forwardIp + = step ;
step = ( searchMatchNb + + > > LZ4_skipTrigger ) ;
if ( unlikely ( forwardIp > mflimitPlusOne ) ) goto _last_literals ;
if ( dictDirective = = usingDictCtx ) {
if ( matchIndex < startIndex ) {
/* there was no match, try the dictionary */
matchIndex = LZ4_getIndexOnHash ( h , dictCtx - > hashTable , byU32 ) ;
match = dictBase + matchIndex ;
matchIndex + = dictDelta ; /* make dictCtx index comparable with current context */
lowLimit = dictionary ;
} else {
match = base + matchIndex ;
lowLimit = ( const BYTE * ) source ;
}
} else if ( dictDirective = = usingExtDict ) {
if ( matchIndex < startIndex ) {
DEBUGLOG ( 7 , " extDict candidate: matchIndex=%5u < startIndex=%5u " , matchIndex , startIndex ) ;
match = dictBase + matchIndex ;
lowLimit = dictionary ;
} else {
match = base + matchIndex ;
lowLimit = ( const BYTE * ) source ;
}
} else { /* single continuous memory segment */
match = base + matchIndex ;
}
forwardH = LZ4_hashPosition ( forwardIp , tableType ) ;
LZ4_putIndexOnHash ( current , h , cctx - > hashTable , tableType ) ;
if ( ( dictIssue = = dictSmall ) & & ( matchIndex < prefixIdxLimit ) ) continue ; /* match outside of valid area */
if ( ( tableType ! = byU16 ) & & ( current - matchIndex > MAX_DISTANCE ) ) continue ; /* too far - note: works even if matchIndex overflows */
if ( LZ4_read32 ( match ) = = LZ4_read32 ( ip ) ) {
if ( maybe_extMem ) offset = current - matchIndex ;
break ; /* match found */
}
} while ( 1 ) ;
}
/* Catch up */
while ( ( ( ip > anchor ) & ( match > lowLimit ) ) & & ( unlikely ( ip [ - 1 ] = = match [ - 1 ] ) ) ) { ip - - ; match - - ; }
/* Encode Literals */
{ unsigned const litLength = ( unsigned ) ( ip - anchor ) ;
token = op + + ;
if ( ( outputLimited ) & & /* Check output buffer overflow */
( unlikely ( op + litLength + ( 2 + 1 + LASTLITERALS ) + ( litLength / 255 ) > olimit ) ) )
return 0 ;
if ( litLength > = RUN_MASK ) {
int len = ( int ) litLength - RUN_MASK ;
* token = ( RUN_MASK < < ML_BITS ) ;
for ( ; len > = 255 ; len - = 255 ) * op + + = 255 ;
* op + + = ( BYTE ) len ;
}
else * token = ( BYTE ) ( litLength < < ML_BITS ) ;
/* Copy Literals */
LZ4_wildCopy ( op , anchor , op + litLength ) ;
op + = litLength ;
DEBUGLOG ( 6 , " seq.start:%i, literals=%u, match.start:%i " , ( int ) ( anchor - ( const BYTE * ) source ) , litLength , ( int ) ( ip - ( const BYTE * ) source ) ) ;
}
_next_match :
/* at this stage, the following variables must be correctly set :
* - ip : at start of LZ operation
* - match : at start of previous pattern occurence ; can be within current prefix , or within extDict
* - offset : if maybe_ext_memSegment = = 1 ( constant )
* - lowLimit : must be = = dictionary to mean " match is within extDict " ; must be = = source otherwise
* - token and * token : position to write 4 - bits for match length ; higher 4 - bits for literal length supposed already written
*/
/* Encode Offset */
if ( maybe_extMem ) { /* static test */
LZ4_writeLE16 ( op , ( U16 ) offset ) ; op + = 2 ;
} else {
LZ4_writeLE16 ( op , ( U16 ) ( ip - match ) ) ; op + = 2 ;
}
/* Encode MatchLength */
{ unsigned matchCode ;
if ( ( dictDirective = = usingExtDict | | dictDirective = = usingDictCtx )
& & ( lowLimit = = dictionary ) /* match within extDict */ ) {
const BYTE * limit = ip + ( dictEnd - match ) ;
if ( limit > matchlimit ) limit = matchlimit ;
matchCode = LZ4_count ( ip + MINMATCH , match + MINMATCH , limit ) ;
ip + = MINMATCH + matchCode ;
if ( ip = = limit ) {
unsigned const more = LZ4_count ( limit , ( const BYTE * ) source , matchlimit ) ;
matchCode + = more ;
ip + = more ;
}
DEBUGLOG ( 6 , " with matchLength=%u starting in extDict " , matchCode + MINMATCH ) ;
} else {
matchCode = LZ4_count ( ip + MINMATCH , match + MINMATCH , matchlimit ) ;
ip + = MINMATCH + matchCode ;
DEBUGLOG ( 6 , " with matchLength=%u " , matchCode + MINMATCH ) ;
}
if ( outputLimited & & /* Check output buffer overflow */
( unlikely ( op + ( 1 + LASTLITERALS ) + ( matchCode > > 8 ) > olimit ) ) )
return 0 ;
if ( matchCode > = ML_MASK ) {
* token + = ML_MASK ;
matchCode - = ML_MASK ;
LZ4_write32 ( op , 0xFFFFFFFF ) ;
while ( matchCode > = 4 * 255 ) {
op + = 4 ;
LZ4_write32 ( op , 0xFFFFFFFF ) ;
matchCode - = 4 * 255 ;
}
op + = matchCode / 255 ;
* op + + = ( BYTE ) ( matchCode % 255 ) ;
} else
* token + = ( BYTE ) ( matchCode ) ;
}
anchor = ip ;
/* Test end of chunk */
if ( ip > = mflimitPlusOne ) break ;
/* Fill table */
LZ4_putPosition ( ip - 2 , cctx - > hashTable , tableType , base ) ;
/* Test next position */
if ( tableType = = byPtr ) {
match = LZ4_getPosition ( ip , cctx - > hashTable , tableType , base ) ;
LZ4_putPosition ( ip , cctx - > hashTable , tableType , base ) ;
if ( ( match + MAX_DISTANCE > = ip )
& & ( LZ4_read32 ( match ) = = LZ4_read32 ( ip ) ) )
{ token = op + + ; * token = 0 ; goto _next_match ; }
} else { /* byU32, byU16 */
U32 const h = LZ4_hashPosition ( ip , tableType ) ;
U32 const current = ( U32 ) ( ip - base ) ;
U32 matchIndex = LZ4_getIndexOnHash ( h , cctx - > hashTable , tableType ) ;
if ( dictDirective = = usingDictCtx ) {
if ( matchIndex < startIndex ) {
/* there was no match, try the dictionary */
matchIndex = LZ4_getIndexOnHash ( h , dictCtx - > hashTable , byU32 ) ;
match = dictBase + matchIndex ;
lowLimit = dictionary ; /* required for match length counter */
matchIndex + = dictDelta ;
} else {
match = base + matchIndex ;
lowLimit = ( const BYTE * ) source ; /* required for match length counter */
}
} else if ( dictDirective = = usingExtDict ) {
if ( matchIndex < startIndex ) {
match = dictBase + matchIndex ;
lowLimit = dictionary ; /* required for match length counter */
} else {
match = base + matchIndex ;
lowLimit = ( const BYTE * ) source ; /* required for match length counter */
}
} else { /* single memory segment */
match = base + matchIndex ;
}
LZ4_putIndexOnHash ( current , h , cctx - > hashTable , tableType ) ;
if ( ( ( dictIssue = = dictSmall ) ? ( matchIndex > = prefixIdxLimit ) : 1 )
& & ( ( tableType = = byU16 ) ? 1 : ( current - matchIndex < = MAX_DISTANCE ) )
& & ( LZ4_read32 ( match ) = = LZ4_read32 ( ip ) ) ) {
token = op + + ;
* token = 0 ;
if ( maybe_extMem ) offset = current - matchIndex ;
DEBUGLOG ( 6 , " seq.start:%i, literals=%u, match.start:%i " , ( int ) ( anchor - ( const BYTE * ) source ) , 0 , ( int ) ( ip - ( const BYTE * ) source ) ) ;
goto _next_match ;
}
}
/* Prepare next loop */
forwardH = LZ4_hashPosition ( + + ip , tableType ) ;
}
_last_literals :
/* Encode Last Literals */
{ size_t const lastRun = ( size_t ) ( iend - anchor ) ;
if ( ( outputLimited ) & & /* Check output buffer overflow */
( ( op - ( BYTE * ) dest ) + lastRun + 1 + ( ( lastRun + 255 - RUN_MASK ) / 255 ) > ( U32 ) maxOutputSize ) )
return 0 ;
if ( lastRun > = RUN_MASK ) {
size_t accumulator = lastRun - RUN_MASK ;
* op + + = RUN_MASK < < ML_BITS ;
for ( ; accumulator > = 255 ; accumulator - = 255 ) * op + + = 255 ;
* op + + = ( BYTE ) accumulator ;
} else {
* op + + = ( BYTE ) ( lastRun < < ML_BITS ) ;
}
memcpy ( op , anchor , lastRun ) ;
op + = lastRun ;
}
return ( int ) ( ( ( char * ) op ) - dest ) ;
}
int LZ4_compress_fast_extState ( void * state , const char * source , char * dest , int inputSize , int maxOutputSize , int acceleration )
{
LZ4_stream_t_internal * ctx = & ( ( LZ4_stream_t * ) state ) - > internal_donotuse ;
if ( acceleration < 1 ) acceleration = ACCELERATION_DEFAULT ;
LZ4_resetStream ( ( LZ4_stream_t * ) state ) ;
if ( maxOutputSize > = LZ4_compressBound ( inputSize ) ) {
if ( inputSize < LZ4_64Klimit ) {
return LZ4_compress_generic ( ctx , source , dest , inputSize , 0 , notLimited , byU16 , noDict , noDictIssue , acceleration ) ;
} else {
const tableType_t tableType = ( ( sizeof ( void * ) = = 4 ) & & ( ( uptrval ) source > MAX_DISTANCE ) ) ? byPtr : byU32 ;
return LZ4_compress_generic ( ctx , source , dest , inputSize , 0 , notLimited , tableType , noDict , noDictIssue , acceleration ) ;
}
} else {
if ( inputSize < LZ4_64Klimit ) { ;
return LZ4_compress_generic ( ctx , source , dest , inputSize , maxOutputSize , limitedOutput , byU16 , noDict , noDictIssue , acceleration ) ;
} else {
const tableType_t tableType = ( ( sizeof ( void * ) = = 4 ) & & ( ( uptrval ) source > MAX_DISTANCE ) ) ? byPtr : byU32 ;
return LZ4_compress_generic ( ctx , source , dest , inputSize , maxOutputSize , limitedOutput , tableType , noDict , noDictIssue , acceleration ) ;
}
}
}
/**
* LZ4_compress_fast_extState_fastReset ( ) :
* A variant of LZ4_compress_fast_extState ( ) .
*
* Using this variant avoids an expensive initialization step . It is only safe
* to call if the state buffer is known to be correctly initialized already
* ( see comment in lz4 . h on LZ4_resetStream_fast ( ) for a definition of
* " correctly initialized " ) .
*/
int LZ4_compress_fast_extState_fastReset ( void * state , const char * src , char * dst , int srcSize , int dstCapacity , int acceleration )
{
LZ4_stream_t_internal * ctx = & ( ( LZ4_stream_t * ) state ) - > internal_donotuse ;
if ( acceleration < 1 ) acceleration = ACCELERATION_DEFAULT ;
if ( dstCapacity > = LZ4_compressBound ( srcSize ) ) {
if ( srcSize < LZ4_64Klimit ) {
const tableType_t tableType = byU16 ;
LZ4_prepareTable ( ctx , srcSize , tableType ) ;
if ( ctx - > currentOffset ) {
return LZ4_compress_generic ( ctx , src , dst , srcSize , 0 , notLimited , tableType , noDict , dictSmall , acceleration ) ;
} else {
return LZ4_compress_generic ( ctx , src , dst , srcSize , 0 , notLimited , tableType , noDict , noDictIssue , acceleration ) ;
}
} else {
const tableType_t tableType = ( ( sizeof ( void * ) = = 4 ) & & ( ( uptrval ) src > MAX_DISTANCE ) ) ? byPtr : byU32 ;
LZ4_prepareTable ( ctx , srcSize , tableType ) ;
return LZ4_compress_generic ( ctx , src , dst , srcSize , 0 , notLimited , tableType , noDict , noDictIssue , acceleration ) ;
}
} else {
if ( srcSize < LZ4_64Klimit ) {
const tableType_t tableType = byU16 ;
LZ4_prepareTable ( ctx , srcSize , tableType ) ;
if ( ctx - > currentOffset ) {
return LZ4_compress_generic ( ctx , src , dst , srcSize , dstCapacity , limitedOutput , tableType , noDict , dictSmall , acceleration ) ;
} else {
return LZ4_compress_generic ( ctx , src , dst , srcSize , dstCapacity , limitedOutput , tableType , noDict , noDictIssue , acceleration ) ;
}
} else {
const tableType_t tableType = ( ( sizeof ( void * ) = = 4 ) & & ( ( uptrval ) src > MAX_DISTANCE ) ) ? byPtr : byU32 ;
LZ4_prepareTable ( ctx , srcSize , tableType ) ;
return LZ4_compress_generic ( ctx , src , dst , srcSize , dstCapacity , limitedOutput , tableType , noDict , noDictIssue , acceleration ) ;
}
}
}
int LZ4_compress_fast ( const char * source , char * dest , int inputSize , int maxOutputSize , int acceleration )
{
int result ;
2021-03-17 07:08:34 +00:00
LZ4_stream_t * ctx = ( LZ4_stream_t * ) ALLOC ( sizeof ( LZ4_stream_t ) ) ;
LZ4_stream_t * const ctxPtr = ctx ;
2020-07-04 18:58:21 +00:00
result = LZ4_compress_fast_extState ( ctxPtr , source , dest , inputSize , maxOutputSize , acceleration ) ;
2021-03-17 07:08:34 +00:00
FREEMEM ( ctx ) ;
2020-07-04 18:58:21 +00:00
return result ;
}
int LZ4_compress_default ( const char * source , char * dest , int inputSize , int maxOutputSize )
{
return LZ4_compress_fast ( source , dest , inputSize , maxOutputSize , 1 ) ;
}
/* hidden debug function */
/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
int LZ4_compress_fast_force ( const char * source , char * dest , int inputSize , int maxOutputSize , int acceleration )
{
2021-03-17 07:08:34 +00:00
int result ;
LZ4_stream_t * ctx = ( LZ4_stream_t * ) ALLOC ( sizeof ( LZ4_stream_t ) ) ;
LZ4_resetStream ( ctx ) ;
2020-07-04 18:58:21 +00:00
if ( inputSize < LZ4_64Klimit )
2021-03-17 07:08:34 +00:00
result = LZ4_compress_generic ( & ctx - > internal_donotuse , source , dest , inputSize , maxOutputSize , limitedOutput , byU16 , noDict , noDictIssue , acceleration ) ;
2020-07-04 18:58:21 +00:00
else
2021-03-17 07:08:34 +00:00
result = LZ4_compress_generic ( & ctx - > internal_donotuse , source , dest , inputSize , maxOutputSize , limitedOutput , sizeof ( void * ) = = 8 ? byU32 : byPtr , noDict , noDictIssue , acceleration ) ;
FREEMEM ( ctx ) ;
return result ;
2020-07-04 18:58:21 +00:00
}
/*-******************************
* * _destSize ( ) variant
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static int LZ4_compress_destSize_generic (
LZ4_stream_t_internal * const ctx ,
const char * const src ,
char * const dst ,
int * const srcSizePtr ,
const int targetDstSize ,
const tableType_t tableType )
{
const BYTE * ip = ( const BYTE * ) src ;
const BYTE * base = ( const BYTE * ) src ;
const BYTE * lowLimit = ( const BYTE * ) src ;
const BYTE * anchor = ip ;
const BYTE * const iend = ip + * srcSizePtr ;
const BYTE * const mflimit = iend - MFLIMIT ;
const BYTE * const matchlimit = iend - LASTLITERALS ;
BYTE * op = ( BYTE * ) dst ;
BYTE * const oend = op + targetDstSize ;
BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */ ;
BYTE * const oMaxMatch = op + targetDstSize - ( LASTLITERALS + 1 /* token */ ) ;
BYTE * const oMaxSeq = oMaxLit - 1 /* token */ ;
U32 forwardH ;
/* Init conditions */
if ( targetDstSize < 1 ) return 0 ; /* Impossible to store anything */
if ( ( U32 ) * srcSizePtr > ( U32 ) LZ4_MAX_INPUT_SIZE ) return 0 ; /* Unsupported input size, too large (or negative) */
if ( ( tableType = = byU16 ) & & ( * srcSizePtr > = LZ4_64Klimit ) ) return 0 ; /* Size too large (not within 64K limit) */
if ( * srcSizePtr < LZ4_minLength ) goto _last_literals ; /* Input too small, no compression (all literals) */
/* First Byte */
* srcSizePtr = 0 ;
LZ4_putPosition ( ip , ctx - > hashTable , tableType , base ) ;
ip + + ; forwardH = LZ4_hashPosition ( ip , tableType ) ;
/* Main Loop */
for ( ; ; ) {
const BYTE * match ;
BYTE * token ;
/* Find a match */
{ const BYTE * forwardIp = ip ;
unsigned step = 1 ;
unsigned searchMatchNb = 1 < < LZ4_skipTrigger ;
do {
U32 h = forwardH ;
ip = forwardIp ;
forwardIp + = step ;
step = ( searchMatchNb + + > > LZ4_skipTrigger ) ;
if ( unlikely ( forwardIp > mflimit ) ) goto _last_literals ;
match = LZ4_getPositionOnHash ( h , ctx - > hashTable , tableType , base ) ;
forwardH = LZ4_hashPosition ( forwardIp , tableType ) ;
LZ4_putPositionOnHash ( ip , h , ctx - > hashTable , tableType , base ) ;
} while ( ( ( tableType = = byU16 ) ? 0 : ( match + MAX_DISTANCE < ip ) )
| | ( LZ4_read32 ( match ) ! = LZ4_read32 ( ip ) ) ) ;
}
/* Catch up */
while ( ( ip > anchor ) & & ( match > lowLimit ) & & ( unlikely ( ip [ - 1 ] = = match [ - 1 ] ) ) ) { ip - - ; match - - ; }
/* Encode Literal length */
{ unsigned litLength = ( unsigned ) ( ip - anchor ) ;
token = op + + ;
if ( op + ( ( litLength + 240 ) / 255 ) + litLength > oMaxLit ) {
/* Not enough space for a last match */
op - - ;
goto _last_literals ;
}
if ( litLength > = RUN_MASK ) {
unsigned len = litLength - RUN_MASK ;
* token = ( RUN_MASK < < ML_BITS ) ;
for ( ; len > = 255 ; len - = 255 ) * op + + = 255 ;
* op + + = ( BYTE ) len ;
}
else * token = ( BYTE ) ( litLength < < ML_BITS ) ;
/* Copy Literals */
LZ4_wildCopy ( op , anchor , op + litLength ) ;
op + = litLength ;
}
_next_match :
/* Encode Offset */
LZ4_writeLE16 ( op , ( U16 ) ( ip - match ) ) ; op + = 2 ;
/* Encode MatchLength */
{ size_t matchLength = LZ4_count ( ip + MINMATCH , match + MINMATCH , matchlimit ) ;
if ( op + ( ( matchLength + 240 ) / 255 ) > oMaxMatch ) {
/* Match description too long : reduce it */
matchLength = ( 15 - 1 ) + ( oMaxMatch - op ) * 255 ;
}
ip + = MINMATCH + matchLength ;
if ( matchLength > = ML_MASK ) {
* token + = ML_MASK ;
matchLength - = ML_MASK ;
while ( matchLength > = 255 ) { matchLength - = 255 ; * op + + = 255 ; }
* op + + = ( BYTE ) matchLength ;
}
else * token + = ( BYTE ) ( matchLength ) ;
}
anchor = ip ;
/* Test end of block */
if ( ip > mflimit ) break ;
if ( op > oMaxSeq ) break ;
/* Fill table */
LZ4_putPosition ( ip - 2 , ctx - > hashTable , tableType , base ) ;
/* Test next position */
match = LZ4_getPosition ( ip , ctx - > hashTable , tableType , base ) ;
LZ4_putPosition ( ip , ctx - > hashTable , tableType , base ) ;
if ( ( match + MAX_DISTANCE > = ip )
& & ( LZ4_read32 ( match ) = = LZ4_read32 ( ip ) ) )
{ token = op + + ; * token = 0 ; goto _next_match ; }
/* Prepare next loop */
forwardH = LZ4_hashPosition ( + + ip , tableType ) ;
}
_last_literals :
/* Encode Last Literals */
{ size_t lastRunSize = ( size_t ) ( iend - anchor ) ;
if ( op + 1 /* token */ + ( ( lastRunSize + 240 ) / 255 ) /* litLength */ + lastRunSize /* literals */ > oend ) {
/* adapt lastRunSize to fill 'dst' */
lastRunSize = ( oend - op ) - 1 ;
lastRunSize - = ( lastRunSize + 240 ) / 255 ;
}
ip = anchor + lastRunSize ;
if ( lastRunSize > = RUN_MASK ) {
size_t accumulator = lastRunSize - RUN_MASK ;
* op + + = RUN_MASK < < ML_BITS ;
for ( ; accumulator > = 255 ; accumulator - = 255 ) * op + + = 255 ;
* op + + = ( BYTE ) accumulator ;
} else {
* op + + = ( BYTE ) ( lastRunSize < < ML_BITS ) ;
}
memcpy ( op , anchor , lastRunSize ) ;
op + = lastRunSize ;
}
/* End */
* srcSizePtr = ( int ) ( ( ( const char * ) ip ) - src ) ;
return ( int ) ( ( ( char * ) op ) - dst ) ;
}
static int LZ4_compress_destSize_extState ( LZ4_stream_t * state , const char * src , char * dst , int * srcSizePtr , int targetDstSize )
{
LZ4_resetStream ( state ) ;
if ( targetDstSize > = LZ4_compressBound ( * srcSizePtr ) ) { /* compression success is guaranteed */
return LZ4_compress_fast_extState ( state , src , dst , * srcSizePtr , targetDstSize , 1 ) ;
} else {
if ( * srcSizePtr < LZ4_64Klimit ) {
return LZ4_compress_destSize_generic ( & state - > internal_donotuse , src , dst , srcSizePtr , targetDstSize , byU16 ) ;
} else {
tableType_t const tableType = ( ( sizeof ( void * ) = = 4 ) & & ( ( uptrval ) src > MAX_DISTANCE ) ) ? byPtr : byU32 ;
return LZ4_compress_destSize_generic ( & state - > internal_donotuse , src , dst , srcSizePtr , targetDstSize , tableType ) ;
} }
}
int LZ4_compress_destSize ( const char * src , char * dst , int * srcSizePtr , int targetDstSize )
{
2021-03-17 07:08:34 +00:00
LZ4_stream_t * ctxBody = ( LZ4_stream_t * ) ALLOC ( sizeof ( LZ4_stream_t ) ) ; ;
LZ4_stream_t * ctx = ctxBody ;
2020-07-04 18:58:21 +00:00
int result = LZ4_compress_destSize_extState ( ctx , src , dst , srcSizePtr , targetDstSize ) ;
2021-03-17 07:08:34 +00:00
FREEMEM ( ctxBody ) ;
2020-07-04 18:58:21 +00:00
return result ;
}
/*-******************************
* Streaming functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
LZ4_stream_t * LZ4_createStream ( void )
{
LZ4_stream_t * lz4s = ( LZ4_stream_t * ) ALLOC ( sizeof ( LZ4_stream_t ) ) ;
LZ4_STATIC_ASSERT ( LZ4_STREAMSIZE > = sizeof ( LZ4_stream_t_internal ) ) ; /* A compilation error here means LZ4_STREAMSIZE is not large enough */
DEBUGLOG ( 4 , " LZ4_createStream %p " , lz4s ) ;
if ( lz4s = = NULL ) return NULL ;
LZ4_resetStream ( lz4s ) ;
return lz4s ;
}
void LZ4_resetStream ( LZ4_stream_t * LZ4_stream )
{
DEBUGLOG ( 5 , " LZ4_resetStream (ctx:%p) " , LZ4_stream ) ;
MEM_INIT ( LZ4_stream , 0 , sizeof ( LZ4_stream_t ) ) ;
}
void LZ4_resetStream_fast ( LZ4_stream_t * ctx ) {
LZ4_prepareTable ( & ( ctx - > internal_donotuse ) , 0 , byU32 ) ;
}
int LZ4_freeStream ( LZ4_stream_t * LZ4_stream )
{
if ( ! LZ4_stream ) return 0 ; /* support free on NULL */
DEBUGLOG ( 5 , " LZ4_freeStream %p " , LZ4_stream ) ;
FREEMEM ( LZ4_stream ) ;
return ( 0 ) ;
}
# define HASH_UNIT sizeof(reg_t)
int LZ4_loadDict ( LZ4_stream_t * LZ4_dict , const char * dictionary , int dictSize )
{
LZ4_stream_t_internal * dict = & LZ4_dict - > internal_donotuse ;
const tableType_t tableType = byU32 ;
const BYTE * p = ( const BYTE * ) dictionary ;
const BYTE * const dictEnd = p + dictSize ;
const BYTE * base ;
DEBUGLOG ( 4 , " LZ4_loadDict (%i bytes from %p into %p) " , dictSize , dictionary , LZ4_dict ) ;
LZ4_prepareTable ( dict , 0 , tableType ) ;
/* We always increment the offset by 64 KB, since, if the dict is longer,
* we truncate it to the last 64 k , and if it ' s shorter , we still want to
* advance by a whole window length so we can provide the guarantee that
* there are only valid offsets in the window , which allows an optimization
* in LZ4_compress_fast_continue ( ) where it uses noDictIssue even when the
* dictionary isn ' t a full 64 k . */
if ( ( dictEnd - p ) > 64 KB ) p = dictEnd - 64 KB ;
base = dictEnd - 64 KB - dict - > currentOffset ;
dict - > dictionary = p ;
dict - > dictSize = ( U32 ) ( dictEnd - p ) ;
dict - > currentOffset + = 64 KB ;
dict - > tableType = tableType ;
if ( dictSize < ( int ) HASH_UNIT ) {
return 0 ;
}
while ( p < = dictEnd - HASH_UNIT ) {
LZ4_putPosition ( p , dict - > hashTable , tableType , base ) ;
p + = 3 ;
}
return dict - > dictSize ;
}
void LZ4_attach_dictionary ( LZ4_stream_t * working_stream , const LZ4_stream_t * dictionary_stream ) {
if ( dictionary_stream ! = NULL ) {
/* If the current offset is zero, we will never look in the
* external dictionary context , since there is no value a table
* entry can take that indicate a miss . In that case , we need
* to bump the offset to something non - zero .
*/
if ( working_stream - > internal_donotuse . currentOffset = = 0 ) {
working_stream - > internal_donotuse . currentOffset = 64 KB ;
}
working_stream - > internal_donotuse . dictCtx = & ( dictionary_stream - > internal_donotuse ) ;
} else {
working_stream - > internal_donotuse . dictCtx = NULL ;
}
}
static void LZ4_renormDictT ( LZ4_stream_t_internal * LZ4_dict , int nextSize )
{
if ( LZ4_dict - > currentOffset + nextSize > 0x80000000 ) { /* potential ptrdiff_t overflow (32-bits mode) */
/* rescale hash table */
U32 const delta = LZ4_dict - > currentOffset - 64 KB ;
const BYTE * dictEnd = LZ4_dict - > dictionary + LZ4_dict - > dictSize ;
int i ;
DEBUGLOG ( 4 , " LZ4_renormDictT " ) ;
for ( i = 0 ; i < LZ4_HASH_SIZE_U32 ; i + + ) {
if ( LZ4_dict - > hashTable [ i ] < delta ) LZ4_dict - > hashTable [ i ] = 0 ;
else LZ4_dict - > hashTable [ i ] - = delta ;
}
LZ4_dict - > currentOffset = 64 KB ;
if ( LZ4_dict - > dictSize > 64 KB ) LZ4_dict - > dictSize = 64 KB ;
LZ4_dict - > dictionary = dictEnd - LZ4_dict - > dictSize ;
}
}
int LZ4_compress_fast_continue ( LZ4_stream_t * LZ4_stream , const char * source , char * dest , int inputSize , int maxOutputSize , int acceleration )
{
const tableType_t tableType = byU32 ;
LZ4_stream_t_internal * streamPtr = & LZ4_stream - > internal_donotuse ;
const BYTE * const dictEnd = streamPtr - > dictionary + streamPtr - > dictSize ;
if ( streamPtr - > initCheck ) return 0 ; /* Uninitialized structure detected */
LZ4_renormDictT ( streamPtr , inputSize ) ; /* avoid index overflow */
if ( acceleration < 1 ) acceleration = ACCELERATION_DEFAULT ;
/* Check overlapping input/dictionary space */
{ const BYTE * sourceEnd = ( const BYTE * ) source + inputSize ;
if ( ( sourceEnd > streamPtr - > dictionary ) & & ( sourceEnd < dictEnd ) ) {
streamPtr - > dictSize = ( U32 ) ( dictEnd - sourceEnd ) ;
if ( streamPtr - > dictSize > 64 KB ) streamPtr - > dictSize = 64 KB ;
if ( streamPtr - > dictSize < 4 ) streamPtr - > dictSize = 0 ;
streamPtr - > dictionary = dictEnd - streamPtr - > dictSize ;
}
}
/* prefix mode : source data follows dictionary */
if ( dictEnd = = ( const BYTE * ) source ) {
if ( ( streamPtr - > dictSize < 64 KB ) & & ( streamPtr - > dictSize < streamPtr - > currentOffset ) )
return LZ4_compress_generic ( streamPtr , source , dest , inputSize , maxOutputSize , limitedOutput , tableType , withPrefix64k , dictSmall , acceleration ) ;
else
return LZ4_compress_generic ( streamPtr , source , dest , inputSize , maxOutputSize , limitedOutput , tableType , withPrefix64k , noDictIssue , acceleration ) ;
}
/* external dictionary mode */
{ int result ;
if ( streamPtr - > dictCtx ) {
/* We depend here on the fact that dictCtx'es (produced by
* LZ4_loadDict ) guarantee that their tables contain no references
* to offsets between dictCtx - > currentOffset - 64 KB and
* dictCtx - > currentOffset - dictCtx - > dictSize . This makes it safe
* to use noDictIssue even when the dict isn ' t a full 64 KB .
*/
if ( inputSize > 4 KB ) {
/* For compressing large blobs, it is faster to pay the setup
* cost to copy the dictionary ' s tables into the active context ,
* so that the compression loop is only looking into one table .
*/
memcpy ( streamPtr , streamPtr - > dictCtx , sizeof ( LZ4_stream_t ) ) ;
result = LZ4_compress_generic ( streamPtr , source , dest , inputSize , maxOutputSize , limitedOutput , tableType , usingExtDict , noDictIssue , acceleration ) ;
} else {
result = LZ4_compress_generic ( streamPtr , source , dest , inputSize , maxOutputSize , limitedOutput , tableType , usingDictCtx , noDictIssue , acceleration ) ;
}
} else {
if ( ( streamPtr - > dictSize < 64 KB ) & & ( streamPtr - > dictSize < streamPtr - > currentOffset ) ) {
result = LZ4_compress_generic ( streamPtr , source , dest , inputSize , maxOutputSize , limitedOutput , tableType , usingExtDict , dictSmall , acceleration ) ;
} else {
result = LZ4_compress_generic ( streamPtr , source , dest , inputSize , maxOutputSize , limitedOutput , tableType , usingExtDict , noDictIssue , acceleration ) ;
}
}
streamPtr - > dictionary = ( const BYTE * ) source ;
streamPtr - > dictSize = ( U32 ) inputSize ;
return result ;
}
}
/* Hidden debug function, to force-test external dictionary mode */
int LZ4_compress_forceExtDict ( LZ4_stream_t * LZ4_dict , const char * source , char * dest , int srcSize )
{
LZ4_stream_t_internal * streamPtr = & LZ4_dict - > internal_donotuse ;
int result ;
LZ4_renormDictT ( streamPtr , srcSize ) ;
if ( ( streamPtr - > dictSize < 64 KB ) & & ( streamPtr - > dictSize < streamPtr - > currentOffset ) ) {
result = LZ4_compress_generic ( streamPtr , source , dest , srcSize , 0 , notLimited , byU32 , usingExtDict , dictSmall , 1 ) ;
} else {
result = LZ4_compress_generic ( streamPtr , source , dest , srcSize , 0 , notLimited , byU32 , usingExtDict , noDictIssue , 1 ) ;
}
streamPtr - > dictionary = ( const BYTE * ) source ;
streamPtr - > dictSize = ( U32 ) srcSize ;
return result ;
}
/*! LZ4_saveDict() :
* If previously compressed data block is not guaranteed to remain available at its memory location ,
* save it into a safer place ( char * safeBuffer ) .
* Note : you don ' t need to call LZ4_loadDict ( ) afterwards ,
* dictionary is immediately usable , you can therefore call LZ4_compress_fast_continue ( ) .
* Return : saved dictionary size in bytes ( necessarily < = dictSize ) , or 0 if error .
*/
int LZ4_saveDict ( LZ4_stream_t * LZ4_dict , char * safeBuffer , int dictSize )
{
LZ4_stream_t_internal * const dict = & LZ4_dict - > internal_donotuse ;
const BYTE * const previousDictEnd = dict - > dictionary + dict - > dictSize ;
if ( ( U32 ) dictSize > 64 KB ) dictSize = 64 KB ; /* useless to define a dictionary > 64 KB */
if ( ( U32 ) dictSize > dict - > dictSize ) dictSize = dict - > dictSize ;
memmove ( safeBuffer , previousDictEnd - dictSize , dictSize ) ;
dict - > dictionary = ( const BYTE * ) safeBuffer ;
dict - > dictSize = ( U32 ) dictSize ;
return dictSize ;
}
/*-*****************************
* Decompression functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*! LZ4_decompress_generic() :
* This generic decompression function covers all use cases .
* It shall be instantiated several times , using different sets of directives .
* Note that it is important for performance that this function really get inlined ,
* in order to remove useless branches during compilation optimization .
*/
LZ4_FORCE_O2_GCC_PPC64LE
LZ4_FORCE_INLINE int LZ4_decompress_generic (
const char * const src ,
char * const dst ,
int srcSize ,
int outputSize , /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
int endOnInput , /* endOnOutputSize, endOnInputSize */
int partialDecoding , /* full, partial */
int targetOutputSize , /* only used if partialDecoding==partial */
int dict , /* noDict, withPrefix64k, usingExtDict */
const BYTE * const lowPrefix , /* always <= dst, == dst when no prefix */
const BYTE * const dictStart , /* only if dict==usingExtDict */
const size_t dictSize /* note : = 0 if noDict */
)
{
const BYTE * ip = ( const BYTE * ) src ;
const BYTE * const iend = ip + srcSize ;
BYTE * op = ( BYTE * ) dst ;
BYTE * const oend = op + outputSize ;
BYTE * cpy ;
BYTE * oexit = op + targetOutputSize ;
const BYTE * const dictEnd = ( const BYTE * ) dictStart + dictSize ;
const unsigned inc32table [ 8 ] = { 0 , 1 , 2 , 1 , 0 , 4 , 4 , 4 } ;
const int dec64table [ 8 ] = { 0 , 0 , 0 , - 1 , - 4 , 1 , 2 , 3 } ;
const int safeDecode = ( endOnInput = = endOnInputSize ) ;
const int checkOffset = ( ( safeDecode ) & & ( dictSize < ( int ) ( 64 KB ) ) ) ;
/* Special cases */
if ( ( partialDecoding ) & & ( oexit > oend - MFLIMIT ) ) oexit = oend - MFLIMIT ; /* targetOutputSize too high => just decode everything */
if ( ( endOnInput ) & & ( unlikely ( outputSize = = 0 ) ) ) return ( ( srcSize = = 1 ) & & ( * ip = = 0 ) ) ? 0 : - 1 ; /* Empty output buffer */
if ( ( ! endOnInput ) & & ( unlikely ( outputSize = = 0 ) ) ) return ( * ip = = 0 ? 1 : - 1 ) ;
/* Main Loop : decode sequences */
while ( 1 ) {
size_t length ;
const BYTE * match ;
size_t offset ;
unsigned const token = * ip + + ;
/* shortcut for common case :
* in most circumstances , we expect to decode small matches ( < = 18 bytes ) separated by few literals ( < = 14 bytes ) .
* this shortcut was tested on x86 and x64 , where it improves decoding speed .
* it has not yet been benchmarked on ARM , Power , mips , etc . */
if ( ( ( ip + 14 /*maxLL*/ + 2 /*offset*/ < = iend )
& ( op + 14 /*maxLL*/ + 18 /*maxML*/ < = oend ) )
& ( ( token < ( 15 < < ML_BITS ) ) & ( ( token & ML_MASK ) ! = 15 ) ) ) {
size_t const ll = token > > ML_BITS ;
size_t const off = LZ4_readLE16 ( ip + ll ) ;
const BYTE * const matchPtr = op + ll - off ; /* pointer underflow risk ? */
if ( ( off > = 8 ) /* do not deal with overlapping matches */ & ( matchPtr > = lowPrefix ) ) {
size_t const ml = ( token & ML_MASK ) + MINMATCH ;
memcpy ( op , ip , 16 ) ; op + = ll ; ip + = ll + 2 /*offset*/ ;
memcpy ( op + 0 , matchPtr + 0 , 8 ) ;
memcpy ( op + 8 , matchPtr + 8 , 8 ) ;
memcpy ( op + 16 , matchPtr + 16 , 2 ) ;
op + = ml ;
continue ;
}
}
/* decode literal length */
if ( ( length = ( token > > ML_BITS ) ) = = RUN_MASK ) {
unsigned s ;
do {
s = * ip + + ;
length + = s ;
} while ( likely ( endOnInput ? ip < iend - RUN_MASK : 1 ) & ( s = = 255 ) ) ;
if ( ( safeDecode ) & & unlikely ( ( uptrval ) ( op ) + length < ( uptrval ) ( op ) ) ) goto _output_error ; /* overflow detection */
if ( ( safeDecode ) & & unlikely ( ( uptrval ) ( ip ) + length < ( uptrval ) ( ip ) ) ) goto _output_error ; /* overflow detection */
}
/* copy literals */
cpy = op + length ;
if ( ( ( endOnInput ) & & ( ( cpy > ( partialDecoding ? oexit : oend - MFLIMIT ) ) | | ( ip + length > iend - ( 2 + 1 + LASTLITERALS ) ) ) )
| | ( ( ! endOnInput ) & & ( cpy > oend - WILDCOPYLENGTH ) ) )
{
if ( partialDecoding ) {
if ( cpy > oend ) goto _output_error ; /* Error : write attempt beyond end of output buffer */
if ( ( endOnInput ) & & ( ip + length > iend ) ) goto _output_error ; /* Error : read attempt beyond end of input buffer */
} else {
if ( ( ! endOnInput ) & & ( cpy ! = oend ) ) goto _output_error ; /* Error : block decoding must stop exactly there */
if ( ( endOnInput ) & & ( ( ip + length ! = iend ) | | ( cpy > oend ) ) ) goto _output_error ; /* Error : input must be consumed */
}
memcpy ( op , ip , length ) ;
ip + = length ;
op + = length ;
break ; /* Necessarily EOF, due to parsing restrictions */
}
LZ4_wildCopy ( op , ip , cpy ) ;
ip + = length ; op = cpy ;
/* get offset */
offset = LZ4_readLE16 ( ip ) ; ip + = 2 ;
match = op - offset ;
if ( ( checkOffset ) & & ( unlikely ( match + dictSize < lowPrefix ) ) ) goto _output_error ; /* Error : offset outside buffers */
LZ4_write32 ( op , ( U32 ) offset ) ; /* costs ~1%; silence an msan warning when offset==0 */
/* get matchlength */
length = token & ML_MASK ;
if ( length = = ML_MASK ) {
unsigned s ;
do {
s = * ip + + ;
if ( ( endOnInput ) & & ( ip > iend - LASTLITERALS ) ) goto _output_error ;
length + = s ;
} while ( s = = 255 ) ;
if ( ( safeDecode ) & & unlikely ( ( uptrval ) ( op ) + length < ( uptrval ) op ) ) goto _output_error ; /* overflow detection */
}
length + = MINMATCH ;
/* check external dictionary */
if ( ( dict = = usingExtDict ) & & ( match < lowPrefix ) ) {
if ( unlikely ( op + length > oend - LASTLITERALS ) ) goto _output_error ; /* doesn't respect parsing restriction */
if ( length < = ( size_t ) ( lowPrefix - match ) ) {
/* match can be copied as a single segment from external dictionary */
memmove ( op , dictEnd - ( lowPrefix - match ) , length ) ;
op + = length ;
} else {
/* match encompass external dictionary and current block */
size_t const copySize = ( size_t ) ( lowPrefix - match ) ;
size_t const restSize = length - copySize ;
memcpy ( op , dictEnd - copySize , copySize ) ;
op + = copySize ;
if ( restSize > ( size_t ) ( op - lowPrefix ) ) { /* overlap copy */
BYTE * const endOfMatch = op + restSize ;
const BYTE * copyFrom = lowPrefix ;
while ( op < endOfMatch ) * op + + = * copyFrom + + ;
} else {
memcpy ( op , lowPrefix , restSize ) ;
op + = restSize ;
} }
continue ;
}
/* copy match within block */
cpy = op + length ;
if ( unlikely ( offset < 8 ) ) {
op [ 0 ] = match [ 0 ] ;
op [ 1 ] = match [ 1 ] ;
op [ 2 ] = match [ 2 ] ;
op [ 3 ] = match [ 3 ] ;
match + = inc32table [ offset ] ;
memcpy ( op + 4 , match , 4 ) ;
match - = dec64table [ offset ] ;
} else { memcpy ( op , match , 8 ) ; match + = 8 ; }
op + = 8 ;
if ( unlikely ( cpy > oend - 12 ) ) {
BYTE * const oCopyLimit = oend - ( WILDCOPYLENGTH - 1 ) ;
if ( cpy > oend - LASTLITERALS ) goto _output_error ; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
if ( op < oCopyLimit ) {
LZ4_wildCopy ( op , match , oCopyLimit ) ;
match + = oCopyLimit - op ;
op = oCopyLimit ;
}
while ( op < cpy ) * op + + = * match + + ;
} else {
memcpy ( op , match , 8 ) ;
if ( length > 16 ) LZ4_wildCopy ( op + 8 , match + 8 , cpy ) ;
}
op = cpy ; /* correction */
}
/* end of decoding */
if ( endOnInput )
return ( int ) ( ( ( char * ) op ) - dst ) ; /* Nb of output bytes decoded */
else
return ( int ) ( ( ( const char * ) ip ) - src ) ; /* Nb of input bytes read */
/* Overflow error detected */
_output_error :
return ( int ) ( - ( ( ( const char * ) ip ) - src ) ) - 1 ;
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe ( const char * source , char * dest , int compressedSize , int maxDecompressedSize )
{
return LZ4_decompress_generic ( source , dest , compressedSize , maxDecompressedSize , endOnInputSize , full , 0 , noDict , ( BYTE * ) dest , NULL , 0 ) ;
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe_partial ( const char * source , char * dest , int compressedSize , int targetOutputSize , int maxDecompressedSize )
{
return LZ4_decompress_generic ( source , dest , compressedSize , maxDecompressedSize , endOnInputSize , partial , targetOutputSize , noDict , ( BYTE * ) dest , NULL , 0 ) ;
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_fast ( const char * source , char * dest , int originalSize )
{
return LZ4_decompress_generic ( source , dest , 0 , originalSize , endOnOutputSize , full , 0 , withPrefix64k , ( BYTE * ) ( dest - 64 KB ) , NULL , 64 KB ) ;
}
/*===== streaming decompression functions =====*/
LZ4_streamDecode_t * LZ4_createStreamDecode ( void )
{
LZ4_streamDecode_t * lz4s = ( LZ4_streamDecode_t * ) ALLOC_AND_ZERO ( sizeof ( LZ4_streamDecode_t ) ) ;
return lz4s ;
}
int LZ4_freeStreamDecode ( LZ4_streamDecode_t * LZ4_stream )
{
if ( ! LZ4_stream ) return 0 ; /* support free on NULL */
FREEMEM ( LZ4_stream ) ;
return 0 ;
}
/*!
* LZ4_setStreamDecode ( ) :
* Use this function to instruct where to find the dictionary .
* This function is not necessary if previous data is still available where it was decoded .
* Loading a size of 0 is allowed ( same effect as no dictionary ) .
* Return : 1 if OK , 0 if error
*/
int LZ4_setStreamDecode ( LZ4_streamDecode_t * LZ4_streamDecode , const char * dictionary , int dictSize )
{
LZ4_streamDecode_t_internal * lz4sd = & LZ4_streamDecode - > internal_donotuse ;
lz4sd - > prefixSize = ( size_t ) dictSize ;
lz4sd - > prefixEnd = ( const BYTE * ) dictionary + dictSize ;
lz4sd - > externalDict = NULL ;
lz4sd - > extDictSize = 0 ;
return 1 ;
}
/*
* _continue ( ) :
These decoding functions allow decompression of multiple blocks in " streaming " mode .
Previously decoded blocks must still be available at the memory position where they were decoded .
If it ' s not possible , save the relevant part of decoded data into a safe buffer ,
and indicate where it stands using LZ4_setStreamDecode ( )
*/
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe_continue ( LZ4_streamDecode_t * LZ4_streamDecode , const char * source , char * dest , int compressedSize , int maxOutputSize )
{
LZ4_streamDecode_t_internal * lz4sd = & LZ4_streamDecode - > internal_donotuse ;
int result ;
if ( lz4sd - > prefixEnd = = ( BYTE * ) dest ) {
result = LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize ,
endOnInputSize , full , 0 ,
usingExtDict , lz4sd - > prefixEnd - lz4sd - > prefixSize , lz4sd - > externalDict , lz4sd - > extDictSize ) ;
if ( result < = 0 ) return result ;
lz4sd - > prefixSize + = result ;
lz4sd - > prefixEnd + = result ;
} else {
lz4sd - > extDictSize = lz4sd - > prefixSize ;
lz4sd - > externalDict = lz4sd - > prefixEnd - lz4sd - > extDictSize ;
result = LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize ,
endOnInputSize , full , 0 ,
usingExtDict , ( BYTE * ) dest , lz4sd - > externalDict , lz4sd - > extDictSize ) ;
if ( result < = 0 ) return result ;
lz4sd - > prefixSize = result ;
lz4sd - > prefixEnd = ( BYTE * ) dest + result ;
}
return result ;
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_fast_continue ( LZ4_streamDecode_t * LZ4_streamDecode , const char * source , char * dest , int originalSize )
{
LZ4_streamDecode_t_internal * lz4sd = & LZ4_streamDecode - > internal_donotuse ;
int result ;
if ( lz4sd - > prefixEnd = = ( BYTE * ) dest ) {
result = LZ4_decompress_generic ( source , dest , 0 , originalSize ,
endOnOutputSize , full , 0 ,
usingExtDict , lz4sd - > prefixEnd - lz4sd - > prefixSize , lz4sd - > externalDict , lz4sd - > extDictSize ) ;
if ( result < = 0 ) return result ;
lz4sd - > prefixSize + = originalSize ;
lz4sd - > prefixEnd + = originalSize ;
} else {
lz4sd - > extDictSize = lz4sd - > prefixSize ;
lz4sd - > externalDict = lz4sd - > prefixEnd - lz4sd - > extDictSize ;
result = LZ4_decompress_generic ( source , dest , 0 , originalSize ,
endOnOutputSize , full , 0 ,
usingExtDict , ( BYTE * ) dest , lz4sd - > externalDict , lz4sd - > extDictSize ) ;
if ( result < = 0 ) return result ;
lz4sd - > prefixSize = originalSize ;
lz4sd - > prefixEnd = ( BYTE * ) dest + originalSize ;
}
return result ;
}
/*
Advanced decoding functions :
* _usingDict ( ) :
These decoding functions work the same as " _continue " ones ,
the dictionary must be explicitly provided within parameters
*/
LZ4_FORCE_O2_GCC_PPC64LE
LZ4_FORCE_INLINE int LZ4_decompress_usingDict_generic ( const char * source , char * dest , int compressedSize , int maxOutputSize , int safe , const char * dictStart , int dictSize )
{
if ( dictSize = = 0 )
return LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize , safe , full , 0 , noDict , ( BYTE * ) dest , NULL , 0 ) ;
if ( dictStart + dictSize = = dest ) {
if ( dictSize > = ( int ) ( 64 KB - 1 ) )
return LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize , safe , full , 0 , withPrefix64k , ( BYTE * ) dest - 64 KB , NULL , 0 ) ;
return LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize , safe , full , 0 , noDict , ( BYTE * ) dest - dictSize , NULL , 0 ) ;
}
return LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize , safe , full , 0 , usingExtDict , ( BYTE * ) dest , ( const BYTE * ) dictStart , dictSize ) ;
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe_usingDict ( const char * source , char * dest , int compressedSize , int maxOutputSize , const char * dictStart , int dictSize )
{
return LZ4_decompress_usingDict_generic ( source , dest , compressedSize , maxOutputSize , 1 , dictStart , dictSize ) ;
}
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_fast_usingDict ( const char * source , char * dest , int originalSize , const char * dictStart , int dictSize )
{
return LZ4_decompress_usingDict_generic ( source , dest , 0 , originalSize , 0 , dictStart , dictSize ) ;
}
/* debug function */
LZ4_FORCE_O2_GCC_PPC64LE
int LZ4_decompress_safe_forceExtDict ( const char * source , char * dest , int compressedSize , int maxOutputSize , const char * dictStart , int dictSize )
{
return LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize , endOnInputSize , full , 0 , usingExtDict , ( BYTE * ) dest , ( const BYTE * ) dictStart , dictSize ) ;
}
/*=*************************************************
* Obsolete Functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* obsolete compression functions */
int LZ4_compress_limitedOutput ( const char * source , char * dest , int inputSize , int maxOutputSize ) { return LZ4_compress_default ( source , dest , inputSize , maxOutputSize ) ; }
int LZ4_compress ( const char * source , char * dest , int inputSize ) { return LZ4_compress_default ( source , dest , inputSize , LZ4_compressBound ( inputSize ) ) ; }
int LZ4_compress_limitedOutput_withState ( void * state , const char * src , char * dst , int srcSize , int dstSize ) { return LZ4_compress_fast_extState ( state , src , dst , srcSize , dstSize , 1 ) ; }
int LZ4_compress_withState ( void * state , const char * src , char * dst , int srcSize ) { return LZ4_compress_fast_extState ( state , src , dst , srcSize , LZ4_compressBound ( srcSize ) , 1 ) ; }
int LZ4_compress_limitedOutput_continue ( LZ4_stream_t * LZ4_stream , const char * src , char * dst , int srcSize , int maxDstSize ) { return LZ4_compress_fast_continue ( LZ4_stream , src , dst , srcSize , maxDstSize , 1 ) ; }
int LZ4_compress_continue ( LZ4_stream_t * LZ4_stream , const char * source , char * dest , int inputSize ) { return LZ4_compress_fast_continue ( LZ4_stream , source , dest , inputSize , LZ4_compressBound ( inputSize ) , 1 ) ; }
/*
These function names are deprecated and should no longer be used .
They are only provided here for compatibility with older user programs .
- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
*/
int LZ4_uncompress ( const char * source , char * dest , int outputSize ) { return LZ4_decompress_fast ( source , dest , outputSize ) ; }
int LZ4_uncompress_unknownOutputSize ( const char * source , char * dest , int isize , int maxOutputSize ) { return LZ4_decompress_safe ( source , dest , isize , maxOutputSize ) ; }
/* Obsolete Streaming functions */
int LZ4_sizeofStreamState ( ) { return LZ4_STREAMSIZE ; }
int LZ4_resetStreamState ( void * state , char * inputBuffer )
{
( void ) inputBuffer ;
LZ4_resetStream ( ( LZ4_stream_t * ) state ) ;
return 0 ;
}
void * LZ4_create ( char * inputBuffer )
{
( void ) inputBuffer ;
return LZ4_createStream ( ) ;
}
char * LZ4_slideInputBuffer ( void * state )
{
/* avoid const char * -> char * conversion warning */
return ( char * ) ( uptrval ) ( ( LZ4_stream_t * ) state ) - > internal_donotuse . dictionary ;
}
/* Obsolete streaming decompression functions */
int LZ4_decompress_safe_withPrefix64k ( const char * source , char * dest , int compressedSize , int maxOutputSize )
{
return LZ4_decompress_generic ( source , dest , compressedSize , maxOutputSize , endOnInputSize , full , 0 , withPrefix64k , ( BYTE * ) dest - 64 KB , NULL , 64 KB ) ;
}
int LZ4_decompress_fast_withPrefix64k ( const char * source , char * dest , int originalSize )
{
return LZ4_decompress_generic ( source , dest , 0 , originalSize , endOnOutputSize , full , 0 , withPrefix64k , ( BYTE * ) dest - 64 KB , NULL , 64 KB ) ;
}
# endif /* LZ4_COMMONDEFS_ONLY */