2013-12-30 17:16:52 +00:00
/*
2015-05-02 14:44:43 +00:00
LZ4 HC - High Compression Mode of LZ4
2017-03-16 22:10:38 +00:00
Copyright ( C ) 2011 - 2017 , Yann Collet .
2015-05-02 14:44:43 +00:00
BSD 2 - Clause License ( http : //www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms , with or without
modification , are permitted provided that the following conditions are
met :
* Redistributions of source code must retain the above copyright
notice , this list of conditions and the following disclaimer .
* Redistributions in binary form must reproduce the above
copyright notice , this list of conditions and the following disclaimer
in the documentation and / or other materials provided with the
distribution .
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
" AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
You can contact the author at :
2016-11-03 14:12:57 +00:00
- LZ4 source repository : https : //github.com/lz4/lz4
2015-05-02 14:44:43 +00:00
- LZ4 public forum : https : //groups.google.com/forum/#!forum/lz4c
2013-12-30 17:16:52 +00:00
*/
2016-11-12 23:50:29 +00:00
/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
2014-02-04 14:11:10 +00:00
2015-10-21 14:00:48 +00:00
/* *************************************
2015-05-02 14:44:43 +00:00
* Tuning Parameter
2015-10-21 14:00:48 +00:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2014-02-04 14:11:10 +00:00
2017-03-16 09:16:24 +00:00
/*! HEAPMODE :
* Select how default compression function will allocate workplace memory ,
* in stack ( 0 : fastest ) , or in heap ( 1 : requires malloc ( ) ) .
* Since workplace is rather large , heap mode is recommended .
2015-10-21 14:00:48 +00:00
*/
2016-11-12 23:50:29 +00:00
# ifndef LZ4HC_HEAPMODE
# define LZ4HC_HEAPMODE 1
# endif
2014-02-04 14:11:10 +00:00
2015-10-21 14:00:48 +00:00
2017-03-16 09:16:24 +00:00
/*=== Dependency ===*/
2017-11-03 18:28:28 +00:00
# define LZ4_HC_STATIC_LINKING_ONLY
2014-11-29 16:12:26 +00:00
# include "lz4hc.h"
2013-12-30 17:16:52 +00:00
2017-03-16 09:16:24 +00:00
/*=== Common LZ4 definitions ===*/
2014-11-29 19:19:39 +00:00
# if defined(__GNUC__)
# pragma GCC diagnostic ignored "-Wunused-function"
2013-12-30 17:16:52 +00:00
# endif
2014-11-29 19:19:39 +00:00
# if defined (__clang__)
# pragma clang diagnostic ignored "-Wunused-function"
2013-12-30 17:16:52 +00:00
# endif
2014-11-29 19:19:39 +00:00
# define LZ4_COMMONDEFS_ONLY
2017-03-16 22:10:38 +00:00
# include "lz4.c" /* LZ4_count, constants, mem */
2013-12-30 17:16:52 +00:00
2017-03-16 09:16:24 +00:00
/*=== Constants ===*/
2013-12-30 17:16:52 +00:00
# define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
2018-02-25 08:32:09 +00:00
# define LZ4_OPT_NUM (1<<12)
2013-12-30 17:16:52 +00:00
2017-03-16 09:16:24 +00:00
/*=== Macros ===*/
2017-08-10 07:48:19 +00:00
# define MIN(a,b) ( (a) < (b) ? (a) : (b) )
# define MAX(a,b) ( (a) > (b) ? (a) : (b) )
2017-05-02 19:01:13 +00:00
# define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
# define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */
# define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */
2013-12-30 17:16:52 +00:00
2014-11-29 19:19:39 +00:00
static U32 LZ4HC_hashPtr ( const void * ptr ) { return HASH_FUNCTION ( LZ4_read32 ( ptr ) ) ; }
2013-12-30 17:16:52 +00:00
2018-04-18 17:55:58 +00:00
/*=== Enums ===*/
typedef enum { noDictCtx , usingDictCtx } dictCtx_directive ;
2013-12-30 17:16:52 +00:00
2014-11-29 19:19:39 +00:00
/**************************************
2015-05-02 14:44:43 +00:00
* HC Compression
2014-11-29 19:19:39 +00:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2018-03-28 16:25:05 +00:00
static void LZ4HC_clearTables ( LZ4HC_CCtx_internal * hc4 )
{
2018-04-04 19:20:47 +00:00
DEBUGLOG ( 4 , " LZ4HC_clearTables(%p) " , hc4 ) ;
2018-03-28 16:25:05 +00:00
MEM_INIT ( ( void * ) hc4 - > hashTable , 0 , sizeof ( hc4 - > hashTable ) ) ;
MEM_INIT ( hc4 - > chainTable , 0xFF , sizeof ( hc4 - > chainTable ) ) ;
}
2018-04-04 19:20:47 +00:00
static void LZ4HC_init ( LZ4HC_CCtx_internal * hc4 , const BYTE * start )
{
2018-04-05 20:32:40 +00:00
uptrval startingOffset = hc4 - > end - hc4 - > base ;
2018-04-04 19:20:47 +00:00
DEBUGLOG ( 4 , " LZ4HC_init(%p, %p) " , hc4 , start ) ;
if ( startingOffset > 1 GB | | startingOffset > ( uptrval ) start ) {
2018-04-04 19:59:00 +00:00
MEM_INIT ( ( void * ) hc4 - > hashTable , 0 , sizeof ( hc4 - > hashTable ) ) ;
2018-04-05 20:32:40 +00:00
startingOffset = 0 ;
2018-04-04 19:20:47 +00:00
}
2018-04-05 20:32:40 +00:00
startingOffset + = MAX_DISTANCE ;
2018-04-06 23:24:22 +00:00
hc4 - > nextToUpdate = ( U32 ) startingOffset ;
2018-04-04 19:20:47 +00:00
hc4 - > base = start - startingOffset ;
hc4 - > end = start ;
hc4 - > dictBase = start - startingOffset ;
2018-04-06 23:24:22 +00:00
hc4 - > dictLimit = ( U32 ) startingOffset ;
hc4 - > lowLimit = ( U32 ) startingOffset ;
2018-04-04 19:20:47 +00:00
}
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* Update chains up to ip (excluded) */
2017-08-24 14:14:20 +00:00
LZ4_FORCE_INLINE void LZ4HC_Insert ( LZ4HC_CCtx_internal * hc4 , const BYTE * ip )
2013-12-30 17:16:52 +00:00
{
2016-09-03 02:06:01 +00:00
U16 * const chainTable = hc4 - > chainTable ;
U32 * const hashTable = hc4 - > hashTable ;
2014-10-18 10:18:14 +00:00
const BYTE * const base = hc4 - > base ;
2016-09-03 02:06:01 +00:00
U32 const target = ( U32 ) ( ip - base ) ;
2014-10-18 10:18:14 +00:00
U32 idx = hc4 - > nextToUpdate ;
2013-12-30 17:16:52 +00:00
2016-09-03 02:06:01 +00:00
while ( idx < target ) {
U32 const h = LZ4HC_hashPtr ( base + idx ) ;
size_t delta = idx - hashTable [ h ] ;
2013-12-30 17:16:52 +00:00
if ( delta > MAX_DISTANCE ) delta = MAX_DISTANCE ;
2017-05-02 19:01:13 +00:00
DELTANEXTU16 ( chainTable , idx ) = ( U16 ) delta ;
2016-09-03 02:06:01 +00:00
hashTable [ h ] = idx ;
2014-10-18 10:18:14 +00:00
idx + + ;
2013-12-30 17:16:52 +00:00
}
2014-08-01 18:10:21 +00:00
2014-10-18 10:18:14 +00:00
hc4 - > nextToUpdate = target ;
2013-12-30 17:16:52 +00:00
}
2017-10-09 06:55:42 +00:00
/** LZ4HC_countBack() :
* @ return : negative value , nb of common bytes before ip / match */
2017-11-07 19:22:57 +00:00
LZ4_FORCE_INLINE
int LZ4HC_countBack ( const BYTE * const ip , const BYTE * const match ,
const BYTE * const iMin , const BYTE * const mMin )
2017-10-09 06:55:42 +00:00
{
2018-02-12 05:03:39 +00:00
int back = 0 ;
int const min = ( int ) MAX ( iMin - ip , mMin - match ) ;
assert ( ip > = iMin ) ; assert ( ( size_t ) ( ip - iMin ) < ( 1U < < 31 ) ) ;
assert ( match > = mMin ) ; assert ( ( size_t ) ( match - mMin ) < ( 1U < < 31 ) ) ;
while ( ( back > min )
& & ( ip [ back - 1 ] = = match [ back - 1 ] ) )
2017-10-09 06:55:42 +00:00
back - - ;
return back ;
}
2017-11-07 18:53:29 +00:00
/* LZ4HC_countPattern() :
* pattern32 must be a sample of repetitive pattern of length 1 , 2 or 4 ( but not 3 ! ) */
2018-02-20 23:25:45 +00:00
static unsigned
LZ4HC_countPattern ( const BYTE * ip , const BYTE * const iEnd , U32 const pattern32 )
2017-10-09 06:55:42 +00:00
{
const BYTE * const iStart = ip ;
2017-11-07 18:53:29 +00:00
reg_t const pattern = ( sizeof ( pattern ) = = 8 ) ? ( reg_t ) pattern32 + ( ( ( reg_t ) pattern32 ) < < 32 ) : pattern32 ;
2017-10-09 06:55:42 +00:00
2017-11-07 18:53:29 +00:00
while ( likely ( ip < iEnd - ( sizeof ( pattern ) - 1 ) ) ) {
2017-10-09 06:55:42 +00:00
reg_t const diff = LZ4_read_ARCH ( ip ) ^ pattern ;
if ( ! diff ) { ip + = sizeof ( pattern ) ; continue ; }
ip + = LZ4_NbCommonBytes ( diff ) ;
return ( unsigned ) ( ip - iStart ) ;
}
2017-11-07 18:53:29 +00:00
if ( LZ4_isLittleEndian ( ) ) {
reg_t patternByte = pattern ;
while ( ( ip < iEnd ) & & ( * ip = = ( BYTE ) patternByte ) ) {
ip + + ; patternByte > > = 8 ;
}
} else { /* big endian */
U32 bitOffset = ( sizeof ( pattern ) * 8 ) - 8 ;
while ( ip < iEnd ) {
BYTE const byte = ( BYTE ) ( pattern > > bitOffset ) ;
if ( * ip ! = byte ) break ;
ip + + ; bitOffset - = 8 ;
}
}
2017-10-09 06:55:42 +00:00
return ( unsigned ) ( ip - iStart ) ;
}
2017-11-07 19:05:48 +00:00
/* LZ4HC_reverseCountPattern() :
* pattern must be a sample of repetitive pattern of length 1 , 2 or 4 ( but not 3 ! )
* read using natural platform endianess */
2018-02-20 23:25:45 +00:00
static unsigned
LZ4HC_reverseCountPattern ( const BYTE * ip , const BYTE * const iLow , U32 pattern )
2017-10-09 06:55:42 +00:00
{
const BYTE * const iStart = ip ;
2017-11-07 19:05:48 +00:00
while ( likely ( ip > = iLow + 4 ) ) {
2017-10-09 06:55:42 +00:00
if ( LZ4_read32 ( ip - 4 ) ! = pattern ) break ;
ip - = 4 ;
}
2017-11-08 01:58:59 +00:00
{ const BYTE * bytePtr = ( const BYTE * ) ( & pattern ) + 3 ; /* works for any endianess */
while ( likely ( ip > iLow ) ) {
if ( ip [ - 1 ] ! = * bytePtr ) break ;
ip - - ; bytePtr - - ;
} }
2017-10-09 06:55:42 +00:00
return ( unsigned ) ( iStart - ip ) ;
}
typedef enum { rep_untested , rep_not , rep_confirmed } repeat_state_e ;
2018-02-20 23:25:45 +00:00
LZ4_FORCE_INLINE int
LZ4HC_InsertAndGetWiderMatch (
2016-11-12 15:29:54 +00:00
LZ4HC_CCtx_internal * hc4 ,
2015-03-30 14:57:26 +00:00
const BYTE * const ip ,
const BYTE * const iLowLimit ,
const BYTE * const iHighLimit ,
2014-10-26 10:22:15 +00:00
int longest ,
const BYTE * * matchpos ,
const BYTE * * startpos ,
2017-12-22 07:07:25 +00:00
const int maxNbAttempts ,
2018-04-18 17:55:58 +00:00
const int patternAnalysis ,
const dictCtx_directive dict )
2013-12-30 17:16:52 +00:00
{
2014-10-18 10:18:14 +00:00
U16 * const chainTable = hc4 - > chainTable ;
U32 * const HashTable = hc4 - > hashTable ;
2018-03-21 20:54:36 +00:00
const LZ4HC_CCtx_internal * const dictCtx = hc4 - > dictCtx ;
2014-10-18 10:18:14 +00:00
const BYTE * const base = hc4 - > base ;
2014-10-20 00:08:21 +00:00
const U32 dictLimit = hc4 - > dictLimit ;
2015-03-30 14:57:26 +00:00
const BYTE * const lowPrefixPtr = base + dictLimit ;
2017-10-09 06:55:42 +00:00
const U32 lowLimit = ( hc4 - > lowLimit + 64 KB > ( U32 ) ( ip - base ) ) ? hc4 - > lowLimit : ( U32 ) ( ip - base ) - MAX_DISTANCE ;
2014-10-20 00:08:21 +00:00
const BYTE * const dictBase = hc4 - > dictBase ;
2017-10-09 08:44:05 +00:00
int const delta = ( int ) ( ip - iLowLimit ) ;
2014-02-04 14:11:10 +00:00
int nbAttempts = maxNbAttempts ;
2017-10-21 00:04:29 +00:00
U32 const pattern = LZ4_read32 ( ip ) ;
2017-06-14 00:25:29 +00:00
U32 matchIndex ;
2018-03-21 20:54:36 +00:00
U32 dictMatchIndex ;
2017-10-09 06:55:42 +00:00
repeat_state_e repeat = rep_untested ;
size_t srcPatternLength = 0 ;
2013-12-30 17:16:52 +00:00
2017-11-02 21:53:06 +00:00
DEBUGLOG ( 7 , " LZ4HC_InsertAndGetWiderMatch " ) ;
2014-10-26 10:22:15 +00:00
/* First Match */
2013-12-30 17:16:52 +00:00
LZ4HC_Insert ( hc4 , ip ) ;
2014-10-20 00:08:21 +00:00
matchIndex = HashTable [ LZ4HC_hashPtr ( ip ) ] ;
2017-11-02 21:53:06 +00:00
DEBUGLOG ( 7 , " First match at index %u / %u (lowLimit) " ,
matchIndex , lowLimit ) ;
2013-12-30 17:16:52 +00:00
2018-04-05 21:41:15 +00:00
while ( ( matchIndex > = lowLimit ) & & ( nbAttempts ) ) {
2017-11-02 21:53:06 +00:00
DEBUGLOG ( 7 , " remaining attempts : %i " , nbAttempts ) ;
2013-12-30 17:16:52 +00:00
nbAttempts - - ;
2016-09-03 02:06:01 +00:00
if ( matchIndex > = dictLimit ) {
2017-06-14 00:25:29 +00:00
const BYTE * const matchPtr = base + matchIndex ;
2018-02-20 23:25:45 +00:00
assert ( longest > = 1 ) ;
2018-02-11 10:45:36 +00:00
if ( LZ4_read16 ( iLowLimit + longest - 1 ) = = LZ4_read16 ( matchPtr - delta + longest - 1 ) ) {
2017-10-21 00:04:29 +00:00
if ( LZ4_read32 ( matchPtr ) = = pattern ) {
2017-10-09 08:44:05 +00:00
int mlt = MINMATCH + LZ4_count ( ip + MINMATCH , matchPtr + MINMATCH , iHighLimit ) ;
2018-02-12 05:03:39 +00:00
int const back = delta ? LZ4HC_countBack ( ip , matchPtr , iLowLimit , lowPrefixPtr ) : 0 ;
2017-10-09 08:44:05 +00:00
mlt - = back ;
if ( mlt > longest ) {
longest = mlt ;
* matchpos = matchPtr + back ;
* startpos = ip + back ;
} }
}
2017-10-09 06:55:42 +00:00
} else { /* matchIndex < dictLimit */
2016-11-04 00:14:25 +00:00
const BYTE * const matchPtr = dictBase + matchIndex ;
2017-10-21 00:04:29 +00:00
if ( LZ4_read32 ( matchPtr ) = = pattern ) {
2017-05-02 19:01:13 +00:00
int mlt ;
2017-10-21 00:04:29 +00:00
int back = 0 ;
2014-10-20 00:08:21 +00:00
const BYTE * vLimit = ip + ( dictLimit - matchIndex ) ;
if ( vLimit > iHighLimit ) vLimit = iHighLimit ;
2015-03-30 14:57:26 +00:00
mlt = LZ4_count ( ip + MINMATCH , matchPtr + MINMATCH , vLimit ) + MINMATCH ;
2014-10-20 00:08:21 +00:00
if ( ( ip + mlt = = vLimit ) & & ( vLimit < iHighLimit ) )
2014-11-29 19:19:39 +00:00
mlt + = LZ4_count ( ip + mlt , base + dictLimit , iHighLimit ) ;
2018-02-12 05:03:39 +00:00
back = delta ? LZ4HC_countBack ( ip , matchPtr , iLowLimit , dictBase + lowLimit ) : 0 ;
2014-10-26 10:22:15 +00:00
mlt - = back ;
2017-10-09 06:55:42 +00:00
if ( mlt > longest ) {
longest = mlt ;
* matchpos = base + matchIndex + back ;
* startpos = ip + back ;
} } }
{ U32 const nextOffset = DELTANEXTU16 ( chainTable , matchIndex ) ;
matchIndex - = nextOffset ;
2017-12-22 07:07:25 +00:00
if ( patternAnalysis & & nextOffset = = 1 ) {
2017-10-09 06:55:42 +00:00
/* may be a repeated pattern */
if ( repeat = = rep_untested ) {
2017-11-07 19:29:28 +00:00
if ( ( ( pattern & 0xFFFF ) = = ( pattern > > 16 ) )
& ( ( pattern & 0xFF ) = = ( pattern > > 24 ) ) ) {
2017-10-09 06:55:42 +00:00
repeat = rep_confirmed ;
2017-10-21 00:04:29 +00:00
srcPatternLength = LZ4HC_countPattern ( ip + 4 , iHighLimit , pattern ) + 4 ;
2017-10-09 06:55:42 +00:00
} else {
repeat = rep_not ;
} }
2017-11-07 19:33:40 +00:00
if ( ( repeat = = rep_confirmed )
2017-10-09 06:55:42 +00:00
& & ( matchIndex > = dictLimit ) ) { /* same segment only */
const BYTE * const matchPtr = base + matchIndex ;
2017-10-21 00:04:29 +00:00
if ( LZ4_read32 ( matchPtr ) = = pattern ) { /* good candidate */
2017-10-09 06:55:42 +00:00
size_t const forwardPatternLength = LZ4HC_countPattern ( matchPtr + sizeof ( pattern ) , iHighLimit , pattern ) + sizeof ( pattern ) ;
const BYTE * const maxLowPtr = ( lowPrefixPtr + MAX_DISTANCE > = ip ) ? lowPrefixPtr : ip - MAX_DISTANCE ;
2017-10-21 00:04:29 +00:00
size_t const backLength = LZ4HC_reverseCountPattern ( matchPtr , maxLowPtr , pattern ) ;
2017-10-09 06:55:42 +00:00
size_t const currentSegmentLength = backLength + forwardPatternLength ;
if ( ( currentSegmentLength > = srcPatternLength ) /* current pattern segment large enough to contain full srcPatternLength */
& & ( forwardPatternLength < = srcPatternLength ) ) { /* haven't reached this position yet */
matchIndex + = ( U32 ) forwardPatternLength - ( U32 ) srcPatternLength ; /* best position, full pattern, might be followed by more match */
} else {
matchIndex - = ( U32 ) backLength ; /* let's go to farthest segment position, will find a match of length currentSegmentLength + maybe some back */
}
} } } }
2018-04-05 21:41:15 +00:00
} /* while ((matchIndex>=lowLimit) && (nbAttempts)) */
2013-12-30 17:16:52 +00:00
2018-04-18 17:55:58 +00:00
if ( dict = = usingDictCtx & & nbAttempts & & ip - base - lowLimit < MAX_DISTANCE ) {
2018-03-21 20:54:36 +00:00
ptrdiff_t dictIndexDelta = dictCtx - > base - dictCtx - > end + lowLimit ;
dictMatchIndex = dictCtx - > hashTable [ LZ4HC_hashPtr ( ip ) ] ;
2018-04-06 23:24:22 +00:00
matchIndex = dictMatchIndex + ( int ) dictIndexDelta ;
2018-03-28 16:26:54 +00:00
while ( dictMatchIndex + MAX_DISTANCE > ip - base - dictIndexDelta & & nbAttempts - - ) {
2018-03-21 20:54:36 +00:00
const BYTE * const matchPtr = dictCtx - > base + dictMatchIndex ;
if ( LZ4_read32 ( matchPtr ) = = pattern ) {
int mlt ;
int back = 0 ;
const BYTE * vLimit = ip + ( dictCtx - > end - matchPtr ) ;
if ( vLimit > iHighLimit ) vLimit = iHighLimit ;
mlt = LZ4_count ( ip + MINMATCH , matchPtr + MINMATCH , vLimit ) + MINMATCH ;
2018-04-06 23:24:22 +00:00
/*
if ( ( ip + mlt = = vLimit ) & & ( vLimit < iHighLimit ) ) {
mlt + = LZ4_count ( ip + mlt , base + lowLimit , iHighLimit ) ;
}
*/
2018-03-21 20:54:36 +00:00
back = delta ? LZ4HC_countBack ( ip , matchPtr , iLowLimit , dictCtx - > base + dictCtx - > dictLimit ) : 0 ;
mlt - = back ;
if ( mlt > longest ) {
longest = mlt ;
* matchpos = base + matchIndex + back ;
* startpos = ip + back ;
}
}
{
U32 const nextOffset = DELTANEXTU16 ( dictCtx - > chainTable , dictMatchIndex ) ;
dictMatchIndex - = nextOffset ;
matchIndex - = nextOffset ;
}
}
}
2013-12-30 17:16:52 +00:00
return longest ;
}
2017-10-09 06:40:21 +00:00
LZ4_FORCE_INLINE
int LZ4HC_InsertAndFindBestMatch ( LZ4HC_CCtx_internal * const hc4 , /* Index table will be updated */
2017-11-08 16:42:59 +00:00
const BYTE * const ip , const BYTE * const iLimit ,
const BYTE * * matchpos ,
2017-12-22 07:07:25 +00:00
const int maxNbAttempts ,
2018-04-18 17:55:58 +00:00
const int patternAnalysis ,
const dictCtx_directive dict )
2017-10-09 06:40:21 +00:00
{
const BYTE * uselessPtr = ip ;
2017-11-08 16:42:59 +00:00
/* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
* but this won ' t be the case here , as we define iLowLimit = = ip ,
2017-11-09 01:11:51 +00:00
* so LZ4HC_InsertAndGetWiderMatch ( ) won ' t be allowed to search past ip */
2018-04-18 17:55:58 +00:00
return LZ4HC_InsertAndGetWiderMatch ( hc4 , ip , ip , iLimit , MINMATCH - 1 , matchpos , & uselessPtr , maxNbAttempts , patternAnalysis , dict ) ;
2017-10-09 06:40:21 +00:00
}
2017-10-09 07:31:12 +00:00
2013-12-30 17:16:52 +00:00
2017-03-08 08:11:15 +00:00
typedef enum {
noLimit = 0 ,
limitedOutput = 1 ,
limitedDestSize = 2 ,
} limitedOutput_directive ;
2013-12-30 17:16:52 +00:00
2017-03-20 09:57:41 +00:00
/* LZ4HC_encodeSequence() :
* @ return : 0 if ok ,
* 1 if buffer issue detected */
2017-08-24 14:14:20 +00:00
LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
2014-10-26 10:22:15 +00:00
const BYTE * * ip ,
BYTE * * op ,
const BYTE * * anchor ,
int matchLength ,
const BYTE * const match ,
2017-03-07 21:30:54 +00:00
limitedOutput_directive limit ,
2014-10-26 10:22:15 +00:00
BYTE * oend )
2013-12-30 17:16:52 +00:00
{
2017-03-07 21:30:54 +00:00
size_t length ;
2017-05-02 19:01:13 +00:00
BYTE * const token = ( * op ) + + ;
2013-12-30 17:16:52 +00:00
2018-02-12 05:03:39 +00:00
# if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
2017-11-02 20:44:57 +00:00
static const BYTE * start = NULL ;
static U32 totalCost = 0 ;
2017-11-02 22:37:18 +00:00
U32 const pos = ( start = = NULL ) ? 0 : ( U32 ) ( * anchor - start ) ;
2017-11-02 20:44:57 +00:00
U32 const ll = ( U32 ) ( * ip - * anchor ) ;
U32 const llAdd = ( ll > = 15 ) ? ( ( ll - 15 ) / 255 ) + 1 : 0 ;
U32 const mlAdd = ( matchLength > = 19 ) ? ( ( matchLength - 19 ) / 255 ) + 1 : 0 ;
U32 const cost = 1 + llAdd + ll + 2 + mlAdd ;
if ( start = = NULL ) start = * anchor ; /* only works for single segment */
2018-04-12 14:25:40 +00:00
/* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
2018-02-12 05:03:39 +00:00
DEBUGLOG ( 6 , " pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u " ,
2017-11-02 21:53:06 +00:00
pos ,
2017-11-02 20:44:57 +00:00
( U32 ) ( * ip - * anchor ) , matchLength , ( U32 ) ( * ip - match ) ,
cost , totalCost ) ;
2017-11-02 21:53:06 +00:00
totalCost + = cost ;
2014-12-16 21:03:16 +00:00
# endif
2014-10-19 16:41:42 +00:00
2014-02-04 14:11:10 +00:00
/* Encode Literal length */
2017-03-07 21:30:54 +00:00
length = ( size_t ) ( * ip - * anchor ) ;
if ( ( limit ) & & ( ( * op + ( length > > 8 ) + length + ( 2 + 1 + LASTLITERALS ) ) > oend ) ) return 1 ; /* Check output limit */
2017-03-16 09:16:24 +00:00
if ( length > = RUN_MASK ) {
size_t len = length - RUN_MASK ;
2017-03-07 21:30:54 +00:00
* token = ( RUN_MASK < < ML_BITS ) ;
for ( ; len > = 255 ; len - = 255 ) * ( * op ) + + = 255 ;
* ( * op ) + + = ( BYTE ) len ;
} else {
* token = ( BYTE ) ( length < < ML_BITS ) ;
}
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* Copy Literals */
2014-11-29 19:19:39 +00:00
LZ4_wildCopy ( * op , * anchor , ( * op ) + length ) ;
* op + = length ;
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* Encode Offset */
2014-11-29 19:19:39 +00:00
LZ4_writeLE16 ( * op , ( U16 ) ( * ip - match ) ) ; * op + = 2 ;
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* Encode MatchLength */
2017-11-02 20:44:57 +00:00
assert ( matchLength > = MINMATCH ) ;
2017-03-07 21:30:54 +00:00
length = ( size_t ) ( matchLength - MINMATCH ) ;
if ( ( limit ) & & ( * op + ( length > > 8 ) + ( 1 + LASTLITERALS ) > oend ) ) return 1 ; /* Check output limit */
if ( length > = ML_MASK ) {
2016-09-03 02:32:06 +00:00
* token + = ML_MASK ;
length - = ML_MASK ;
2017-03-07 21:30:54 +00:00
for ( ; length > = 510 ; length - = 510 ) { * ( * op ) + + = 255 ; * ( * op ) + + = 255 ; }
if ( length > = 255 ) { length - = 255 ; * ( * op ) + + = 255 ; }
2016-09-03 02:32:06 +00:00
* ( * op ) + + = ( BYTE ) length ;
} else {
* token + = ( BYTE ) ( length ) ;
}
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* Prepare next loop */
2013-12-30 17:16:52 +00:00
* ip + = matchLength ;
* anchor = * ip ;
return 0 ;
}
2016-12-06 14:21:28 +00:00
static int LZ4HC_compress_hashChain (
2016-11-12 15:29:54 +00:00
LZ4HC_CCtx_internal * const ctx ,
2016-11-08 03:32:24 +00:00
const char * const source ,
char * const dest ,
2017-03-08 08:11:15 +00:00
int * srcSizePtr ,
2016-11-08 03:32:24 +00:00
int const maxOutputSize ,
2016-12-07 11:16:33 +00:00
unsigned maxNbAttempts ,
2018-04-18 17:55:58 +00:00
const limitedOutput_directive limit ,
const dictCtx_directive dict
2014-10-26 10:22:15 +00:00
)
2013-12-30 17:16:52 +00:00
{
2017-03-08 08:11:15 +00:00
const int inputSize = * srcSizePtr ;
2017-12-22 07:07:25 +00:00
const int patternAnalysis = ( maxNbAttempts > 64 ) ; /* levels 8+ */
2017-03-08 08:11:15 +00:00
2013-12-30 17:16:52 +00:00
const BYTE * ip = ( const BYTE * ) source ;
const BYTE * anchor = ip ;
const BYTE * const iend = ip + inputSize ;
const BYTE * const mflimit = iend - MFLIMIT ;
const BYTE * const matchlimit = ( iend - LASTLITERALS ) ;
2017-03-08 08:11:15 +00:00
BYTE * optr = ( BYTE * ) dest ;
2013-12-30 17:16:52 +00:00
BYTE * op = ( BYTE * ) dest ;
2017-03-08 15:49:55 +00:00
BYTE * oend = op + maxOutputSize ;
2013-12-30 17:16:52 +00:00
int ml , ml2 , ml3 , ml0 ;
2016-09-03 02:06:01 +00:00
const BYTE * ref = NULL ;
const BYTE * start2 = NULL ;
const BYTE * ref2 = NULL ;
const BYTE * start3 = NULL ;
const BYTE * ref3 = NULL ;
2013-12-30 17:16:52 +00:00
const BYTE * start0 ;
const BYTE * ref0 ;
2014-10-18 10:18:14 +00:00
/* init */
2017-03-08 08:11:15 +00:00
* srcSizePtr = 0 ;
2017-12-22 11:47:59 +00:00
if ( limit = = limitedDestSize ) oend - = LASTLITERALS ; /* Hack for support LZ4 format restriction */
2017-03-08 08:11:15 +00:00
if ( inputSize < LZ4_minLength ) goto _last_literals ; /* Input too small, no compression (all literals) */
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* Main Loop */
edge case : compress up to end-mflimit (12 bytes)
The LZ4 block format specification
states that the last match must start
at a minimum distance of 12 bytes from the end of the block.
However, out of an abundance of caution,
the reference implementation would actually stop searching matches
at 13 bytes from the end of the block.
This patch fixes this small detail.
The new version is now able to properly compress a limit case
such as `aaaaaaaabaaa\n`
as reported by Gao Xiang (@hsiangkao).
Obviously, it doesn't change a lot of things.
This is just one additional match candidate per block, with a maximum match length of 7 (since last 5 bytes must remain literals).
With default policy, blocks are 4 MB long, so it doesn't happen too often
Compressing silesia.tar at default level 1 saves 5 bytes (100930101 -> 100930096).
At max level 12, it saves a grand 16 bytes (77389871 -> 77389855).
The impact is a bit more visible when blocks are smaller, hence more numerous.
For example, compressing silesia with blocks of 64 KB (using -12 -B4D) saves 543 bytes (77304583 -> 77304040).
So the smaller the packet size, the more visible the impact.
And it happens we have a ton of scenarios with little blocks using LZ4 compression ...
And a useless "hooray" sidenote :
the patch improves the LZ4 compression record of silesia (using -12 -B7D --no-frame-crc) by 16 bytes (77270672 -> 77270656)
and the record on enwik9 by 44 bytes (371680396 -> 371680352) (previously claimed by [smallz4](http://create.stephan-brumme.com/smallz4/) ).
2018-02-24 19:47:53 +00:00
while ( ip < = mflimit ) {
2018-04-18 17:55:58 +00:00
ml = LZ4HC_InsertAndFindBestMatch ( ctx , ip , matchlimit , & ref , maxNbAttempts , patternAnalysis , dict ) ;
2017-10-09 06:40:21 +00:00
if ( ml < MINMATCH ) { ip + + ; continue ; }
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* saved, in case we would skip too much */
2013-12-30 17:16:52 +00:00
start0 = ip ;
ref0 = ref ;
ml0 = ml ;
_Search2 :
edge case : compress up to end-mflimit (12 bytes)
The LZ4 block format specification
states that the last match must start
at a minimum distance of 12 bytes from the end of the block.
However, out of an abundance of caution,
the reference implementation would actually stop searching matches
at 13 bytes from the end of the block.
This patch fixes this small detail.
The new version is now able to properly compress a limit case
such as `aaaaaaaabaaa\n`
as reported by Gao Xiang (@hsiangkao).
Obviously, it doesn't change a lot of things.
This is just one additional match candidate per block, with a maximum match length of 7 (since last 5 bytes must remain literals).
With default policy, blocks are 4 MB long, so it doesn't happen too often
Compressing silesia.tar at default level 1 saves 5 bytes (100930101 -> 100930096).
At max level 12, it saves a grand 16 bytes (77389871 -> 77389855).
The impact is a bit more visible when blocks are smaller, hence more numerous.
For example, compressing silesia with blocks of 64 KB (using -12 -B4D) saves 543 bytes (77304583 -> 77304040).
So the smaller the packet size, the more visible the impact.
And it happens we have a ton of scenarios with little blocks using LZ4 compression ...
And a useless "hooray" sidenote :
the patch improves the LZ4 compression record of silesia (using -12 -B7D --no-frame-crc) by 16 bytes (77270672 -> 77270656)
and the record on enwik9 by 44 bytes (371680396 -> 371680352) (previously claimed by [smallz4](http://create.stephan-brumme.com/smallz4/) ).
2018-02-24 19:47:53 +00:00
if ( ip + ml < = mflimit )
2017-12-22 07:07:25 +00:00
ml2 = LZ4HC_InsertAndGetWiderMatch ( ctx ,
ip + ml - 2 , ip + 0 , matchlimit , ml , & ref2 , & start2 ,
2018-04-18 17:55:58 +00:00
maxNbAttempts , patternAnalysis , dict ) ;
2017-03-08 15:49:55 +00:00
else
ml2 = ml ;
2013-12-30 17:16:52 +00:00
2016-09-03 02:06:01 +00:00
if ( ml2 = = ml ) { /* No better match */
2017-03-08 08:11:15 +00:00
optr = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml , ref , limit , oend ) ) goto _dest_overflow ;
2013-12-30 17:16:52 +00:00
continue ;
}
2016-09-03 02:06:01 +00:00
if ( start0 < ip ) {
if ( start2 < ip + ml0 ) { /* empirical */
2013-12-30 17:16:52 +00:00
ip = start0 ;
ref = ref0 ;
ml = ml0 ;
}
}
2014-02-04 14:11:10 +00:00
/* Here, start0==ip */
2016-09-03 02:06:01 +00:00
if ( ( start2 - ip ) < 3 ) { /* First Match too small : removed */
2013-12-30 17:16:52 +00:00
ml = ml2 ;
ip = start2 ;
ref = ref2 ;
goto _Search2 ;
}
_Search3 :
2017-03-16 09:16:24 +00:00
/* At this stage, we have :
* ml2 > ml1 , and
* ip1 + 3 < = ip2 ( usually < ip1 + ml1 ) */
2016-09-03 02:06:01 +00:00
if ( ( start2 - ip ) < OPTIMAL_ML ) {
2013-12-30 17:16:52 +00:00
int correction ;
int new_ml = ml ;
if ( new_ml > OPTIMAL_ML ) new_ml = OPTIMAL_ML ;
if ( ip + new_ml > start2 + ml2 - MINMATCH ) new_ml = ( int ) ( start2 - ip ) + ml2 - MINMATCH ;
correction = new_ml - ( int ) ( start2 - ip ) ;
2016-09-03 02:06:01 +00:00
if ( correction > 0 ) {
2013-12-30 17:16:52 +00:00
start2 + = correction ;
ref2 + = correction ;
ml2 - = correction ;
}
}
2014-02-04 14:11:10 +00:00
/* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
2013-12-30 17:16:52 +00:00
edge case : compress up to end-mflimit (12 bytes)
The LZ4 block format specification
states that the last match must start
at a minimum distance of 12 bytes from the end of the block.
However, out of an abundance of caution,
the reference implementation would actually stop searching matches
at 13 bytes from the end of the block.
This patch fixes this small detail.
The new version is now able to properly compress a limit case
such as `aaaaaaaabaaa\n`
as reported by Gao Xiang (@hsiangkao).
Obviously, it doesn't change a lot of things.
This is just one additional match candidate per block, with a maximum match length of 7 (since last 5 bytes must remain literals).
With default policy, blocks are 4 MB long, so it doesn't happen too often
Compressing silesia.tar at default level 1 saves 5 bytes (100930101 -> 100930096).
At max level 12, it saves a grand 16 bytes (77389871 -> 77389855).
The impact is a bit more visible when blocks are smaller, hence more numerous.
For example, compressing silesia with blocks of 64 KB (using -12 -B4D) saves 543 bytes (77304583 -> 77304040).
So the smaller the packet size, the more visible the impact.
And it happens we have a ton of scenarios with little blocks using LZ4 compression ...
And a useless "hooray" sidenote :
the patch improves the LZ4 compression record of silesia (using -12 -B7D --no-frame-crc) by 16 bytes (77270672 -> 77270656)
and the record on enwik9 by 44 bytes (371680396 -> 371680352) (previously claimed by [smallz4](http://create.stephan-brumme.com/smallz4/) ).
2018-02-24 19:47:53 +00:00
if ( start2 + ml2 < = mflimit )
2017-12-22 07:07:25 +00:00
ml3 = LZ4HC_InsertAndGetWiderMatch ( ctx ,
start2 + ml2 - 3 , start2 , matchlimit , ml2 , & ref3 , & start3 ,
2018-04-18 17:55:58 +00:00
maxNbAttempts , patternAnalysis , dict ) ;
2017-03-07 14:11:48 +00:00
else
ml3 = ml2 ;
2013-12-30 17:16:52 +00:00
2016-09-03 02:06:01 +00:00
if ( ml3 = = ml2 ) { /* No better match : 2 sequences to encode */
2014-02-04 14:11:10 +00:00
/* ip & ref are known; Now for ml */
2013-12-30 17:16:52 +00:00
if ( start2 < ip + ml ) ml = ( int ) ( start2 - ip ) ;
2014-02-04 14:11:10 +00:00
/* Now, encode 2 sequences */
2017-03-08 08:11:15 +00:00
optr = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml , ref , limit , oend ) ) goto _dest_overflow ;
2013-12-30 17:16:52 +00:00
ip = start2 ;
2017-03-08 08:11:15 +00:00
optr = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml2 , ref2 , limit , oend ) ) goto _dest_overflow ;
2013-12-30 17:16:52 +00:00
continue ;
}
2016-09-03 02:06:01 +00:00
if ( start3 < ip + ml + 3 ) { /* Not enough space for match 2 : remove it */
if ( start3 > = ( ip + ml ) ) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
if ( start2 < ip + ml ) {
2013-12-30 17:16:52 +00:00
int correction = ( int ) ( ip + ml - start2 ) ;
start2 + = correction ;
ref2 + = correction ;
ml2 - = correction ;
2016-09-03 02:06:01 +00:00
if ( ml2 < MINMATCH ) {
2013-12-30 17:16:52 +00:00
start2 = start3 ;
ref2 = ref3 ;
ml2 = ml3 ;
}
}
2017-03-08 08:11:15 +00:00
optr = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml , ref , limit , oend ) ) goto _dest_overflow ;
2013-12-30 17:16:52 +00:00
ip = start3 ;
ref = ref3 ;
ml = ml3 ;
start0 = start2 ;
ref0 = ref2 ;
ml0 = ml2 ;
goto _Search2 ;
}
start2 = start3 ;
ref2 = ref3 ;
ml2 = ml3 ;
goto _Search3 ;
}
2014-02-04 14:11:10 +00:00
/*
2014-10-26 10:22:15 +00:00
* OK , now we have 3 ascending matches ; let ' s write at least the first one
* ip & ref are known ; Now for ml
*/
2016-09-03 02:06:01 +00:00
if ( start2 < ip + ml ) {
if ( ( start2 - ip ) < ( int ) ML_MASK ) {
2013-12-30 17:16:52 +00:00
int correction ;
if ( ml > OPTIMAL_ML ) ml = OPTIMAL_ML ;
if ( ip + ml > start2 + ml2 - MINMATCH ) ml = ( int ) ( start2 - ip ) + ml2 - MINMATCH ;
correction = ml - ( int ) ( start2 - ip ) ;
2016-09-03 02:06:01 +00:00
if ( correction > 0 ) {
2013-12-30 17:16:52 +00:00
start2 + = correction ;
ref2 + = correction ;
ml2 - = correction ;
}
2016-09-03 02:06:01 +00:00
} else {
2013-12-30 17:16:52 +00:00
ml = ( int ) ( start2 - ip ) ;
}
}
2017-03-08 08:11:15 +00:00
optr = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml , ref , limit , oend ) ) goto _dest_overflow ;
2013-12-30 17:16:52 +00:00
ip = start2 ;
ref = ref2 ;
ml = ml2 ;
start2 = start3 ;
ref2 = ref3 ;
ml2 = ml3 ;
goto _Search3 ;
}
2017-03-08 08:11:15 +00:00
_last_literals :
2014-02-04 14:11:10 +00:00
/* Encode Last Literals */
2017-03-16 09:16:24 +00:00
{ size_t lastRunSize = ( size_t ) ( iend - anchor ) ; /* literals */
size_t litLength = ( lastRunSize + 255 - RUN_MASK ) / 255 ;
size_t const totalSize = 1 + litLength + lastRunSize ;
2017-03-08 15:49:55 +00:00
if ( limit = = limitedDestSize ) oend + = LASTLITERALS ; /* restore correct value */
2017-03-08 08:11:15 +00:00
if ( limit & & ( op + totalSize > oend ) ) {
if ( limit = = limitedOutput ) return 0 ; /* Check output limit */
2017-03-09 09:19:24 +00:00
/* adapt lastRunSize to fill 'dest' */
2017-03-08 08:11:15 +00:00
lastRunSize = ( size_t ) ( oend - op ) - 1 ;
litLength = ( lastRunSize + 255 - RUN_MASK ) / 255 ;
lastRunSize - = litLength ;
}
ip = anchor + lastRunSize ;
2017-03-07 14:11:48 +00:00
if ( lastRunSize > = RUN_MASK ) {
size_t accumulator = lastRunSize - RUN_MASK ;
* op + + = ( RUN_MASK < < ML_BITS ) ;
for ( ; accumulator > = 255 ; accumulator - = 255 ) * op + + = 255 ;
* op + + = ( BYTE ) accumulator ;
} else {
* op + + = ( BYTE ) ( lastRunSize < < ML_BITS ) ;
}
memcpy ( op , anchor , lastRunSize ) ;
op + = lastRunSize ;
2013-12-30 17:16:52 +00:00
}
2014-02-04 14:11:10 +00:00
/* End */
2017-03-08 08:11:15 +00:00
* srcSizePtr = ( int ) ( ( ( const char * ) ip ) - source ) ;
2013-12-30 17:16:52 +00:00
return ( int ) ( ( ( char * ) op ) - dest ) ;
2017-03-08 08:11:15 +00:00
_dest_overflow :
if ( limit = = limitedDestSize ) {
op = optr ; /* restore correct out pointer */
goto _last_literals ;
}
return 0 ;
2013-12-30 17:16:52 +00:00
}
2018-02-25 08:32:09 +00:00
static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal * ctx ,
const char * const source , char * dst ,
int * srcSizePtr , int dstCapacity ,
int const nbSearches , size_t sufficient_len ,
2018-04-18 17:55:58 +00:00
const limitedOutput_directive limit , int const fullUpdate ,
const dictCtx_directive dict ) ;
2018-02-25 08:32:09 +00:00
2013-12-30 17:16:52 +00:00
2018-04-18 17:55:58 +00:00
LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
2016-12-06 14:21:28 +00:00
LZ4HC_CCtx_internal * const ctx ,
2017-03-16 22:10:38 +00:00
const char * const src ,
char * const dst ,
int * const srcSizePtr ,
int const dstCapacity ,
int cLevel ,
2018-04-18 17:55:58 +00:00
const limitedOutput_directive limit ,
const dictCtx_directive dict
2016-12-06 14:21:28 +00:00
)
{
2017-12-20 13:14:01 +00:00
typedef enum { lz4hc , lz4opt } lz4hc_strat_e ;
typedef struct {
lz4hc_strat_e strat ;
U32 nbSearches ;
U32 targetLength ;
} cParams_t ;
static const cParams_t clTable [ LZ4HC_CLEVEL_MAX + 1 ] = {
{ lz4hc , 2 , 16 } , /* 0, unused */
{ lz4hc , 2 , 16 } , /* 1, unused */
{ lz4hc , 2 , 16 } , /* 2, unused */
{ lz4hc , 4 , 16 } , /* 3 */
{ lz4hc , 8 , 16 } , /* 4 */
{ lz4hc , 16 , 16 } , /* 5 */
{ lz4hc , 32 , 16 } , /* 6 */
{ lz4hc , 64 , 16 } , /* 7 */
{ lz4hc , 128 , 16 } , /* 8 */
{ lz4hc , 256 , 16 } , /* 9 */
{ lz4opt , 96 , 64 } , /*10==LZ4HC_CLEVEL_OPT_MIN*/
{ lz4opt , 512 , 128 } , /*11 */
{ lz4opt , 8192 , LZ4_OPT_NUM } , /* 12==LZ4HC_CLEVEL_MAX */
} ;
2018-03-19 21:47:52 +00:00
DEBUGLOG ( 4 , " LZ4HC_compress_generic(%p, %p, %d) " , ctx , src , * srcSizePtr ) ;
2017-12-22 11:47:59 +00:00
if ( limit = = limitedDestSize & & dstCapacity < 1 ) return 0 ; /* Impossible to store anything */
if ( ( U32 ) * srcSizePtr > ( U32 ) LZ4_MAX_INPUT_SIZE ) return 0 ; /* Unsupported input size (too large or negative) */
2017-11-03 17:48:55 +00:00
ctx - > end + = * srcSizePtr ;
if ( cLevel < 1 ) cLevel = LZ4HC_CLEVEL_DEFAULT ; /* note : convention is different from lz4frame, maybe something to review */
2017-12-20 13:14:01 +00:00
cLevel = MIN ( LZ4HC_CLEVEL_MAX , cLevel ) ;
assert ( cLevel > = 0 ) ;
assert ( cLevel < = LZ4HC_CLEVEL_MAX ) ;
{ cParams_t const cParam = clTable [ cLevel ] ;
if ( cParam . strat = = lz4hc )
return LZ4HC_compress_hashChain ( ctx ,
src , dst , srcSizePtr , dstCapacity ,
2018-04-18 17:55:58 +00:00
cParam . nbSearches , limit , dict ) ;
2017-12-20 13:14:01 +00:00
assert ( cParam . strat = = lz4opt ) ;
return LZ4HC_compress_optimal ( ctx ,
2017-12-22 11:47:59 +00:00
src , dst , srcSizePtr , dstCapacity ,
cParam . nbSearches , cParam . targetLength , limit ,
2018-04-18 17:55:58 +00:00
cLevel = = LZ4HC_CLEVEL_MAX , dict ) ; /* ultra mode */
}
}
static int LZ4HC_compress_generic_noDictCtx (
LZ4HC_CCtx_internal * const ctx ,
const char * const src ,
char * const dst ,
int * const srcSizePtr ,
int const dstCapacity ,
int cLevel ,
limitedOutput_directive limit
)
{
assert ( ctx - > dictCtx = = NULL ) ;
return LZ4HC_compress_generic_internal ( ctx , src , dst , srcSizePtr , dstCapacity , cLevel , limit , noDictCtx ) ;
}
static int LZ4HC_compress_generic_dictCtx (
LZ4HC_CCtx_internal * const ctx ,
const char * const src ,
char * const dst ,
int * const srcSizePtr ,
int const dstCapacity ,
int cLevel ,
limitedOutput_directive limit
)
{
assert ( ctx - > dictCtx ! = NULL ) ;
return LZ4HC_compress_generic_internal ( ctx , src , dst , srcSizePtr , dstCapacity , cLevel , limit , usingDictCtx ) ;
}
static int LZ4HC_compress_generic (
LZ4HC_CCtx_internal * const ctx ,
const char * const src ,
char * const dst ,
int * const srcSizePtr ,
int const dstCapacity ,
int cLevel ,
limitedOutput_directive limit
)
{
if ( ctx - > dictCtx = = NULL ) {
return LZ4HC_compress_generic_noDictCtx ( ctx , src , dst , srcSizePtr , dstCapacity , cLevel , limit ) ;
} else {
return LZ4HC_compress_generic_dictCtx ( ctx , src , dst , srcSizePtr , dstCapacity , cLevel , limit ) ;
2016-12-06 14:21:28 +00:00
}
}
2016-11-12 23:50:29 +00:00
int LZ4_sizeofStateHC ( void ) { return sizeof ( LZ4_streamHC_t ) ; }
2013-12-30 17:16:52 +00:00
2018-04-16 19:09:59 +00:00
int LZ4_compress_HC_extStateHC_fastReset ( void * state , const char * src , char * dst , int srcSize , int dstCapacity , int compressionLevel )
2013-12-30 17:16:52 +00:00
{
2017-03-16 09:16:24 +00:00
LZ4HC_CCtx_internal * const ctx = & ( ( LZ4_streamHC_t * ) state ) - > internal_donotuse ;
2014-02-04 14:11:10 +00:00
if ( ( ( size_t ) ( state ) & ( sizeof ( void * ) - 1 ) ) ! = 0 ) return 0 ; /* Error : state is not aligned for pointers (32 or 64 bits) */
2018-04-16 19:09:59 +00:00
LZ4_resetStreamHC_fast ( ( LZ4_streamHC_t * ) state , compressionLevel ) ;
2016-11-11 21:00:02 +00:00
LZ4HC_init ( ctx , ( const BYTE * ) src ) ;
2017-03-16 22:10:38 +00:00
if ( dstCapacity < LZ4_compressBound ( srcSize ) )
return LZ4HC_compress_generic ( ctx , src , dst , & srcSize , dstCapacity , compressionLevel , limitedOutput ) ;
2015-04-11 11:28:09 +00:00
else
2017-03-16 22:10:38 +00:00
return LZ4HC_compress_generic ( ctx , src , dst , & srcSize , dstCapacity , compressionLevel , noLimit ) ;
2013-12-30 17:16:52 +00:00
}
2018-04-16 19:09:59 +00:00
int LZ4_compress_HC_extStateHC ( void * state , const char * src , char * dst , int srcSize , int dstCapacity , int compressionLevel )
{
if ( ( ( size_t ) ( state ) & ( sizeof ( void * ) - 1 ) ) ! = 0 ) return 0 ; /* Error : state is not aligned for pointers (32 or 64 bits) */
LZ4_resetStreamHC ( ( LZ4_streamHC_t * ) state , compressionLevel ) ;
return LZ4_compress_HC_extStateHC_fastReset ( state , src , dst , srcSize , dstCapacity , compressionLevel ) ;
}
2017-03-16 22:10:38 +00:00
int LZ4_compress_HC ( const char * src , char * dst , int srcSize , int dstCapacity , int compressionLevel )
2013-12-30 17:16:52 +00:00
{
2016-11-12 23:50:29 +00:00
# if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
2016-11-13 01:14:57 +00:00
LZ4_streamHC_t * const statePtr = ( LZ4_streamHC_t * ) malloc ( sizeof ( LZ4_streamHC_t ) ) ;
2015-10-21 14:00:48 +00:00
# else
2016-11-11 21:00:02 +00:00
LZ4_streamHC_t state ;
LZ4_streamHC_t * const statePtr = & state ;
2015-10-21 14:00:48 +00:00
# endif
2018-04-05 21:41:15 +00:00
int cSize = LZ4_compress_HC_extStateHC ( statePtr , src , dst , srcSize , dstCapacity , compressionLevel ) ;
2016-11-12 23:50:29 +00:00
# if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
2015-10-21 14:00:48 +00:00
free ( statePtr ) ;
# endif
return cSize ;
2013-12-30 17:16:52 +00:00
}
2017-03-16 22:10:38 +00:00
/* LZ4_compress_HC_destSize() :
2017-12-20 13:14:01 +00:00
* only compatible with regular HC parser */
2017-03-16 22:10:38 +00:00
int LZ4_compress_HC_destSize ( void * LZ4HC_Data , const char * source , char * dest , int * sourceSizePtr , int targetDestSize , int cLevel )
{
LZ4HC_CCtx_internal * const ctx = & ( ( LZ4_streamHC_t * ) LZ4HC_Data ) - > internal_donotuse ;
2018-04-05 21:41:15 +00:00
LZ4_resetStreamHC ( ( LZ4_streamHC_t * ) LZ4HC_Data , cLevel ) ;
2017-03-16 22:10:38 +00:00
LZ4HC_init ( ctx , ( const BYTE * ) source ) ;
return LZ4HC_compress_generic ( ctx , source , dest , sourceSizePtr , targetDestSize , cLevel , limitedDestSize ) ;
}
2013-12-30 17:16:52 +00:00
2014-12-03 18:17:10 +00:00
2014-10-18 10:18:14 +00:00
/**************************************
2015-05-02 14:44:43 +00:00
* Streaming Functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2014-10-18 10:18:14 +00:00
/* allocation */
2018-03-19 21:47:52 +00:00
LZ4_streamHC_t * LZ4_createStreamHC ( void ) {
LZ4_streamHC_t * LZ4_streamHCPtr = ( LZ4_streamHC_t * ) malloc ( sizeof ( LZ4_streamHC_t ) ) ;
DEBUGLOG ( 4 , " LZ4_createStreamHC() -> %p " , LZ4_streamHCPtr ) ;
2018-04-06 23:24:22 +00:00
LZ4_streamHCPtr - > internal_donotuse . end = ( const BYTE * ) - 1 ;
2018-04-04 19:20:47 +00:00
LZ4_streamHCPtr - > internal_donotuse . base = NULL ;
LZ4_streamHCPtr - > internal_donotuse . dictCtx = NULL ;
2018-03-19 21:47:52 +00:00
return LZ4_streamHCPtr ;
}
int LZ4_freeStreamHC ( LZ4_streamHC_t * LZ4_streamHCPtr ) {
DEBUGLOG ( 4 , " LZ4_freeStreamHC(%p) " , LZ4_streamHCPtr ) ;
2017-08-09 23:51:19 +00:00
if ( ! LZ4_streamHCPtr ) return 0 ; /* support free on NULL */
free ( LZ4_streamHCPtr ) ;
return 0 ;
}
2013-12-30 17:16:52 +00:00
2014-10-18 10:18:14 +00:00
/* initialization */
void LZ4_resetStreamHC ( LZ4_streamHC_t * LZ4_streamHCPtr , int compressionLevel )
2013-12-30 17:16:52 +00:00
{
2016-11-12 15:29:54 +00:00
LZ4_STATIC_ASSERT ( sizeof ( LZ4HC_CCtx_internal ) < = sizeof ( size_t ) * LZ4_STREAMHCSIZE_SIZET ) ; /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
2018-03-19 21:47:52 +00:00
DEBUGLOG ( 4 , " LZ4_resetStreamHC(%p, %d) " , LZ4_streamHCPtr , compressionLevel ) ;
2018-04-05 21:41:15 +00:00
LZ4_streamHCPtr - > internal_donotuse . end = ( const BYTE * ) - 1 ;
2016-11-11 21:00:02 +00:00
LZ4_streamHCPtr - > internal_donotuse . base = NULL ;
2018-03-21 18:49:47 +00:00
LZ4_streamHCPtr - > internal_donotuse . dictCtx = NULL ;
2017-11-03 18:28:28 +00:00
LZ4_setCompressionLevel ( LZ4_streamHCPtr , compressionLevel ) ;
2014-02-04 14:11:10 +00:00
}
2018-04-16 19:09:59 +00:00
void LZ4_resetStreamHC_fast ( LZ4_streamHC_t * LZ4_streamHCPtr , int compressionLevel )
{
LZ4_STATIC_ASSERT ( sizeof ( LZ4HC_CCtx_internal ) < = sizeof ( size_t ) * LZ4_STREAMHCSIZE_SIZET ) ; /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
DEBUGLOG ( 4 , " LZ4_resetStreamHC_fast(%p, %d) " , LZ4_streamHCPtr , compressionLevel ) ;
LZ4_streamHCPtr - > internal_donotuse . end - = ( uptrval ) LZ4_streamHCPtr - > internal_donotuse . base ;
LZ4_streamHCPtr - > internal_donotuse . base = NULL ;
LZ4_streamHCPtr - > internal_donotuse . dictCtx = NULL ;
LZ4_setCompressionLevel ( LZ4_streamHCPtr , compressionLevel ) ;
}
2017-08-10 07:48:19 +00:00
void LZ4_setCompressionLevel ( LZ4_streamHC_t * LZ4_streamHCPtr , int compressionLevel )
{
2018-01-22 20:50:06 +00:00
if ( compressionLevel < 1 ) compressionLevel = LZ4HC_CLEVEL_DEFAULT ;
2017-11-03 18:28:28 +00:00
if ( compressionLevel > LZ4HC_CLEVEL_MAX ) compressionLevel = LZ4HC_CLEVEL_MAX ;
2017-08-10 07:48:19 +00:00
LZ4_streamHCPtr - > internal_donotuse . compressionLevel = compressionLevel ;
}
2014-10-18 10:18:14 +00:00
int LZ4_loadDictHC ( LZ4_streamHC_t * LZ4_streamHCPtr , const char * dictionary , int dictSize )
2014-02-04 14:11:10 +00:00
{
2017-03-16 09:16:24 +00:00
LZ4HC_CCtx_internal * const ctxPtr = & LZ4_streamHCPtr - > internal_donotuse ;
2018-03-19 21:47:52 +00:00
DEBUGLOG ( 4 , " LZ4_loadDictHC(%p, %p, %d) " , LZ4_streamHCPtr , dictionary , dictSize ) ;
2016-09-03 02:06:01 +00:00
if ( dictSize > 64 KB ) {
2014-11-02 21:32:12 +00:00
dictionary + = dictSize - 64 KB ;
dictSize = 64 KB ;
}
2014-12-03 18:17:10 +00:00
LZ4HC_init ( ctxPtr , ( const BYTE * ) dictionary ) ;
2018-03-28 16:25:05 +00:00
LZ4HC_clearTables ( ctxPtr ) ;
2014-12-03 18:17:10 +00:00
ctxPtr - > end = ( const BYTE * ) dictionary + dictSize ;
2017-11-03 08:18:12 +00:00
if ( dictSize > = 4 ) LZ4HC_Insert ( ctxPtr , ctxPtr - > end - 3 ) ;
2014-11-02 21:32:12 +00:00
return dictSize ;
2014-10-18 10:18:14 +00:00
}
/* compression */
2016-11-12 15:29:54 +00:00
static void LZ4HC_setExternalDict ( LZ4HC_CCtx_internal * ctxPtr , const BYTE * newBlock )
2014-12-03 18:17:10 +00:00
{
2018-03-19 21:47:52 +00:00
DEBUGLOG ( 4 , " LZ4HC_setExternalDict(%p, %p) " , ctxPtr , newBlock ) ;
2017-11-03 08:18:12 +00:00
if ( ctxPtr - > end > = ctxPtr - > base + 4 ) LZ4HC_Insert ( ctxPtr , ctxPtr - > end - 3 ) ; /* Referencing remaining dictionary content */
2016-12-09 16:16:35 +00:00
2014-12-03 18:17:10 +00:00
/* Only one memory segment for extDict, so any previous extDict is lost at this stage */
ctxPtr - > lowLimit = ctxPtr - > dictLimit ;
ctxPtr - > dictLimit = ( U32 ) ( ctxPtr - > end - ctxPtr - > base ) ;
ctxPtr - > dictBase = ctxPtr - > base ;
ctxPtr - > base = newBlock - ctxPtr - > dictLimit ;
ctxPtr - > end = newBlock ;
2016-12-28 14:38:59 +00:00
ctxPtr - > nextToUpdate = ctxPtr - > dictLimit ; /* match referencing will resume from there */
2014-12-03 18:17:10 +00:00
}
2016-11-11 21:00:02 +00:00
static int LZ4_compressHC_continue_generic ( LZ4_streamHC_t * LZ4_streamHCPtr ,
2017-03-16 22:41:30 +00:00
const char * src , char * dst ,
int * srcSizePtr , int dstCapacity ,
limitedOutput_directive limit )
2014-10-22 07:07:56 +00:00
{
2017-03-16 09:16:24 +00:00
LZ4HC_CCtx_internal * const ctxPtr = & LZ4_streamHCPtr - > internal_donotuse ;
2018-03-19 21:47:52 +00:00
DEBUGLOG ( 4 , " LZ4_compressHC_continue_generic(%p, %p, %d) " , LZ4_streamHCPtr , src , * srcSizePtr ) ;
2014-10-22 07:07:56 +00:00
/* auto-init if forgotten */
2017-03-16 22:41:30 +00:00
if ( ctxPtr - > base = = NULL ) LZ4HC_init ( ctxPtr , ( const BYTE * ) src ) ;
2014-10-22 07:07:56 +00:00
2014-10-26 10:22:15 +00:00
/* Check overflow */
2016-09-03 02:06:01 +00:00
if ( ( size_t ) ( ctxPtr - > end - ctxPtr - > base ) > 2 GB ) {
2014-12-03 18:17:10 +00:00
size_t dictSize = ( size_t ) ( ctxPtr - > end - ctxPtr - > base ) - ctxPtr - > dictLimit ;
2014-10-26 10:22:15 +00:00
if ( dictSize > 64 KB ) dictSize = 64 KB ;
2016-11-11 21:00:02 +00:00
LZ4_loadDictHC ( LZ4_streamHCPtr , ( const char * ) ( ctxPtr - > end ) - dictSize , ( int ) dictSize ) ;
2014-10-26 10:22:15 +00:00
}
/* Check if blocks follow each other */
2017-03-16 22:41:30 +00:00
if ( ( const BYTE * ) src ! = ctxPtr - > end ) LZ4HC_setExternalDict ( ctxPtr , ( const BYTE * ) src ) ;
2014-10-22 07:07:56 +00:00
2014-10-26 10:22:15 +00:00
/* Check overlapping input/dictionary space */
2017-03-16 22:41:30 +00:00
{ const BYTE * sourceEnd = ( const BYTE * ) src + * srcSizePtr ;
2016-09-03 02:06:01 +00:00
const BYTE * const dictBegin = ctxPtr - > dictBase + ctxPtr - > lowLimit ;
const BYTE * const dictEnd = ctxPtr - > dictBase + ctxPtr - > dictLimit ;
2017-03-16 22:41:30 +00:00
if ( ( sourceEnd > dictBegin ) & & ( ( const BYTE * ) src < dictEnd ) ) {
2014-10-25 19:52:10 +00:00
if ( sourceEnd > dictEnd ) sourceEnd = dictEnd ;
2014-12-03 18:17:10 +00:00
ctxPtr - > lowLimit = ( U32 ) ( sourceEnd - ctxPtr - > dictBase ) ;
if ( ctxPtr - > dictLimit - ctxPtr - > lowLimit < 4 ) ctxPtr - > lowLimit = ctxPtr - > dictLimit ;
2014-10-22 07:07:56 +00:00
}
}
2017-03-16 22:41:30 +00:00
return LZ4HC_compress_generic ( ctxPtr , src , dst , srcSizePtr , dstCapacity , ctxPtr - > compressionLevel , limit ) ;
2014-10-22 07:07:56 +00:00
}
2017-03-16 22:10:38 +00:00
int LZ4_compress_HC_continue ( LZ4_streamHC_t * LZ4_streamHCPtr , const char * src , char * dst , int srcSize , int dstCapacity )
2014-10-18 10:18:14 +00:00
{
2017-03-16 22:10:38 +00:00
if ( dstCapacity < LZ4_compressBound ( srcSize ) )
2017-03-16 22:41:30 +00:00
return LZ4_compressHC_continue_generic ( LZ4_streamHCPtr , src , dst , & srcSize , dstCapacity , limitedOutput ) ;
2015-04-11 11:28:09 +00:00
else
2017-03-16 22:41:30 +00:00
return LZ4_compressHC_continue_generic ( LZ4_streamHCPtr , src , dst , & srcSize , dstCapacity , noLimit ) ;
2014-10-18 10:18:14 +00:00
}
2017-03-16 22:41:30 +00:00
int LZ4_compress_HC_continue_destSize ( LZ4_streamHC_t * LZ4_streamHCPtr , const char * src , char * dst , int * srcSizePtr , int targetDestSize )
2017-03-08 08:11:15 +00:00
{
2017-03-16 22:41:30 +00:00
return LZ4_compressHC_continue_generic ( LZ4_streamHCPtr , src , dst , srcSizePtr , targetDestSize , limitedDestSize ) ;
2017-03-08 08:11:15 +00:00
}
2017-03-16 09:16:24 +00:00
2014-10-18 10:18:14 +00:00
/* dictionary saving */
int LZ4_saveDictHC ( LZ4_streamHC_t * LZ4_streamHCPtr , char * safeBuffer , int dictSize )
{
2016-11-12 15:29:54 +00:00
LZ4HC_CCtx_internal * const streamPtr = & LZ4_streamHCPtr - > internal_donotuse ;
2016-09-03 02:06:01 +00:00
int const prefixSize = ( int ) ( streamPtr - > end - ( streamPtr - > base + streamPtr - > dictLimit ) ) ;
2018-03-19 21:47:52 +00:00
DEBUGLOG ( 4 , " LZ4_saveDictHC(%p, %p, %d) " , LZ4_streamHCPtr , safeBuffer , dictSize ) ;
2014-10-18 10:18:14 +00:00
if ( dictSize > 64 KB ) dictSize = 64 KB ;
2014-11-02 21:32:12 +00:00
if ( dictSize < 4 ) dictSize = 0 ;
if ( dictSize > prefixSize ) dictSize = prefixSize ;
2015-04-12 08:29:52 +00:00
memmove ( safeBuffer , streamPtr - > end - dictSize , dictSize ) ;
2016-09-03 02:06:01 +00:00
{ U32 const endIndex = ( U32 ) ( streamPtr - > end - streamPtr - > base ) ;
2014-11-02 21:32:12 +00:00
streamPtr - > end = ( const BYTE * ) safeBuffer + dictSize ;
streamPtr - > base = streamPtr - > end - endIndex ;
streamPtr - > dictLimit = endIndex - dictSize ;
streamPtr - > lowLimit = endIndex - dictSize ;
2014-12-16 01:13:19 +00:00
if ( streamPtr - > nextToUpdate < streamPtr - > dictLimit ) streamPtr - > nextToUpdate = streamPtr - > dictLimit ;
2014-11-02 21:32:12 +00:00
}
2014-10-18 10:18:14 +00:00
return dictSize ;
2013-12-30 17:16:52 +00:00
}
2014-10-18 10:18:14 +00:00
/***********************************
2015-05-02 14:44:43 +00:00
* Deprecated Functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-11-12 23:50:29 +00:00
/* These functions currently generate deprecation warnings */
2015-04-11 11:28:09 +00:00
/* Deprecated compression functions */
2015-05-03 19:57:21 +00:00
int LZ4_compressHC ( const char * src , char * dst , int srcSize ) { return LZ4_compress_HC ( src , dst , srcSize , LZ4_compressBound ( srcSize ) , 0 ) ; }
int LZ4_compressHC_limitedOutput ( const char * src , char * dst , int srcSize , int maxDstSize ) { return LZ4_compress_HC ( src , dst , srcSize , maxDstSize , 0 ) ; }
int LZ4_compressHC2 ( const char * src , char * dst , int srcSize , int cLevel ) { return LZ4_compress_HC ( src , dst , srcSize , LZ4_compressBound ( srcSize ) , cLevel ) ; }
int LZ4_compressHC2_limitedOutput ( const char * src , char * dst , int srcSize , int maxDstSize , int cLevel ) { return LZ4_compress_HC ( src , dst , srcSize , maxDstSize , cLevel ) ; }
int LZ4_compressHC_withStateHC ( void * state , const char * src , char * dst , int srcSize ) { return LZ4_compress_HC_extStateHC ( state , src , dst , srcSize , LZ4_compressBound ( srcSize ) , 0 ) ; }
int LZ4_compressHC_limitedOutput_withStateHC ( void * state , const char * src , char * dst , int srcSize , int maxDstSize ) { return LZ4_compress_HC_extStateHC ( state , src , dst , srcSize , maxDstSize , 0 ) ; }
int LZ4_compressHC2_withStateHC ( void * state , const char * src , char * dst , int srcSize , int cLevel ) { return LZ4_compress_HC_extStateHC ( state , src , dst , srcSize , LZ4_compressBound ( srcSize ) , cLevel ) ; }
int LZ4_compressHC2_limitedOutput_withStateHC ( void * state , const char * src , char * dst , int srcSize , int maxDstSize , int cLevel ) { return LZ4_compress_HC_extStateHC ( state , src , dst , srcSize , maxDstSize , cLevel ) ; }
int LZ4_compressHC_continue ( LZ4_streamHC_t * ctx , const char * src , char * dst , int srcSize ) { return LZ4_compress_HC_continue ( ctx , src , dst , srcSize , LZ4_compressBound ( srcSize ) ) ; }
int LZ4_compressHC_limitedOutput_continue ( LZ4_streamHC_t * ctx , const char * src , char * dst , int srcSize , int maxDstSize ) { return LZ4_compress_HC_continue ( ctx , src , dst , srcSize , maxDstSize ) ; }
2015-04-11 11:28:09 +00:00
/* Deprecated streaming functions */
2014-10-18 10:18:14 +00:00
int LZ4_sizeofStreamStateHC ( void ) { return LZ4_STREAMHCSIZE ; }
2015-05-06 00:58:24 +00:00
int LZ4_resetStreamStateHC ( void * state , char * inputBuffer )
2014-10-18 10:18:14 +00:00
{
2016-11-12 15:29:54 +00:00
LZ4HC_CCtx_internal * ctx = & ( ( LZ4_streamHC_t * ) state ) - > internal_donotuse ;
2014-10-18 10:18:14 +00:00
if ( ( ( ( size_t ) state ) & ( sizeof ( void * ) - 1 ) ) ! = 0 ) return 1 ; /* Error : pointer is not aligned for pointer (32 or 64 bits) */
2016-11-11 21:00:02 +00:00
LZ4HC_init ( ctx , ( const BYTE * ) inputBuffer ) ;
2018-02-05 23:18:00 +00:00
ctx - > inputBuffer = inputBuffer ;
2014-10-18 10:18:14 +00:00
return 0 ;
}
2018-02-05 23:18:00 +00:00
void * LZ4_createHC ( const char * inputBuffer )
2014-10-18 10:18:14 +00:00
{
2018-01-26 22:29:50 +00:00
LZ4_streamHC_t * hc4 = ( LZ4_streamHC_t * ) ALLOC ( sizeof ( LZ4_streamHC_t ) ) ;
2015-06-29 03:51:11 +00:00
if ( hc4 = = NULL ) return NULL ; /* not enough memory */
2016-11-11 21:00:02 +00:00
LZ4HC_init ( & hc4 - > internal_donotuse , ( const BYTE * ) inputBuffer ) ;
2018-02-05 23:18:00 +00:00
assert ( sizeof ( size_t ) = = sizeof ( void * ) ) ;
hc4 - > internal_donotuse . inputBuffer = ( void * ) ( size_t ) inputBuffer ; /* ugly hack, circumvent -Wcast-qual */
2014-10-18 10:18:14 +00:00
return hc4 ;
}
2017-08-09 23:51:19 +00:00
int LZ4_freeHC ( void * LZ4HC_Data ) {
if ( ! LZ4HC_Data ) return 0 ; /* support free on NULL */
FREEMEM ( LZ4HC_Data ) ;
return 0 ;
}
2014-10-18 10:18:14 +00:00
2017-03-16 22:10:38 +00:00
int LZ4_compressHC2_continue ( void * LZ4HC_Data , const char * src , char * dst , int srcSize , int cLevel )
2014-10-18 10:18:14 +00:00
{
2017-03-16 22:10:38 +00:00
return LZ4HC_compress_generic ( & ( ( LZ4_streamHC_t * ) LZ4HC_Data ) - > internal_donotuse , src , dst , & srcSize , 0 , cLevel , noLimit ) ;
2014-10-18 10:18:14 +00:00
}
2013-12-30 17:16:52 +00:00
2017-03-16 22:10:38 +00:00
int LZ4_compressHC2_limitedOutput_continue ( void * LZ4HC_Data , const char * src , char * dst , int srcSize , int dstCapacity , int cLevel )
2014-02-04 14:11:10 +00:00
{
2017-03-16 22:10:38 +00:00
return LZ4HC_compress_generic ( & ( ( LZ4_streamHC_t * ) LZ4HC_Data ) - > internal_donotuse , src , dst , & srcSize , dstCapacity , cLevel , limitedOutput ) ;
2014-02-04 14:11:10 +00:00
}
2014-10-18 10:18:14 +00:00
char * LZ4_slideInputBufferHC ( void * LZ4HC_Data )
{
2016-11-12 23:50:29 +00:00
LZ4HC_CCtx_internal * const hc4 = & ( ( LZ4_streamHC_t * ) LZ4HC_Data ) - > internal_donotuse ;
int const dictSize = LZ4_saveDictHC ( ( LZ4_streamHC_t * ) LZ4HC_Data , ( char * ) ( hc4 - > inputBuffer ) , 64 KB ) ;
2018-02-05 23:18:00 +00:00
return ( char * ) ( hc4 - > inputBuffer ) + dictSize ;
2014-10-18 10:18:14 +00:00
}
2018-02-25 08:32:09 +00:00
/* ================================================
* LZ4 Optimal parser ( levels 10 - 12 )
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
typedef struct {
int price ;
int off ;
int mlen ;
int litlen ;
} LZ4HC_optimal_t ;
/* price in bytes */
LZ4_FORCE_INLINE int LZ4HC_literalsPrice ( int const litlen )
{
int price = litlen ;
if ( litlen > = ( int ) RUN_MASK )
price + = 1 + ( litlen - RUN_MASK ) / 255 ;
return price ;
}
/* requires mlen >= MINMATCH */
LZ4_FORCE_INLINE int LZ4HC_sequencePrice ( int litlen , int mlen )
{
int price = 1 + 2 ; /* token + 16-bit offset */
price + = LZ4HC_literalsPrice ( litlen ) ;
if ( mlen > = ( int ) ( ML_MASK + MINMATCH ) )
price + = 1 + ( mlen - ( ML_MASK + MINMATCH ) ) / 255 ;
return price ;
}
typedef struct {
int off ;
int len ;
} LZ4HC_match_t ;
LZ4_FORCE_INLINE LZ4HC_match_t
LZ4HC_FindLongerMatch ( LZ4HC_CCtx_internal * const ctx ,
const BYTE * ip , const BYTE * const iHighLimit ,
2018-04-18 17:55:58 +00:00
int minLen , int nbSearches ,
const dictCtx_directive dict )
2018-02-25 08:32:09 +00:00
{
LZ4HC_match_t match = { 0 , 0 } ;
const BYTE * matchPtr = NULL ;
/* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
* but this won ' t be the case here , as we define iLowLimit = = ip ,
* so LZ4HC_InsertAndGetWiderMatch ( ) won ' t be allowed to search past ip */
int const matchLength = LZ4HC_InsertAndGetWiderMatch ( ctx ,
ip , ip , iHighLimit , minLen , & matchPtr , & ip ,
2018-04-18 17:55:58 +00:00
nbSearches , 1 /* patternAnalysis */ , dict ) ;
2018-02-25 08:32:09 +00:00
if ( matchLength < = minLen ) return match ;
match . len = matchLength ;
match . off = ( int ) ( ip - matchPtr ) ;
return match ;
}
static int LZ4HC_compress_optimal (
LZ4HC_CCtx_internal * ctx ,
const char * const source ,
char * dst ,
int * srcSizePtr ,
int dstCapacity ,
int const nbSearches ,
size_t sufficient_len ,
2018-04-18 17:55:58 +00:00
const limitedOutput_directive limit ,
int const fullUpdate ,
const dictCtx_directive dict
2018-02-25 08:32:09 +00:00
)
{
# define TRAILING_LITERALS 3
LZ4HC_optimal_t opt [ LZ4_OPT_NUM + TRAILING_LITERALS ] ; /* ~64 KB, which is a bit large for stack... */
const BYTE * ip = ( const BYTE * ) source ;
const BYTE * anchor = ip ;
const BYTE * const iend = ip + * srcSizePtr ;
const BYTE * const mflimit = iend - MFLIMIT ;
const BYTE * const matchlimit = iend - LASTLITERALS ;
BYTE * op = ( BYTE * ) dst ;
BYTE * opSaved = ( BYTE * ) dst ;
BYTE * oend = op + dstCapacity ;
/* init */
DEBUGLOG ( 5 , " LZ4HC_compress_optimal " ) ;
* srcSizePtr = 0 ;
if ( limit = = limitedDestSize ) oend - = LASTLITERALS ; /* Hack for support LZ4 format restriction */
if ( sufficient_len > = LZ4_OPT_NUM ) sufficient_len = LZ4_OPT_NUM - 1 ;
/* Main Loop */
assert ( ip - anchor < LZ4_MAX_INPUT_SIZE ) ;
while ( ip < = mflimit ) {
int const llen = ( int ) ( ip - anchor ) ;
int best_mlen , best_off ;
int cur , last_match_pos = 0 ;
2018-04-18 17:55:58 +00:00
LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch ( ctx , ip , matchlimit , MINMATCH - 1 , nbSearches , dict ) ;
2018-02-25 08:32:09 +00:00
if ( firstMatch . len = = 0 ) { ip + + ; continue ; }
if ( ( size_t ) firstMatch . len > sufficient_len ) {
/* good enough solution : immediate encoding */
int const firstML = firstMatch . len ;
const BYTE * const matchPos = ip - firstMatch . off ;
opSaved = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , firstML , matchPos , limit , oend ) ) /* updates ip, op and anchor */
goto _dest_overflow ;
continue ;
}
/* set prices for first positions (literals) */
{ int rPos ;
for ( rPos = 0 ; rPos < MINMATCH ; rPos + + ) {
int const cost = LZ4HC_literalsPrice ( llen + rPos ) ;
opt [ rPos ] . mlen = 1 ;
opt [ rPos ] . off = 0 ;
opt [ rPos ] . litlen = llen + rPos ;
opt [ rPos ] . price = cost ;
DEBUGLOG ( 7 , " rPos:%3i => price:%3i (litlen=%i) -- initial setup " ,
rPos , cost , opt [ rPos ] . litlen ) ;
} }
/* set prices using initial match */
{ int mlen = MINMATCH ;
int const matchML = firstMatch . len ; /* necessarily < sufficient_len < LZ4_OPT_NUM */
int const offset = firstMatch . off ;
assert ( matchML < LZ4_OPT_NUM ) ;
for ( ; mlen < = matchML ; mlen + + ) {
int const cost = LZ4HC_sequencePrice ( llen , mlen ) ;
opt [ mlen ] . mlen = mlen ;
opt [ mlen ] . off = offset ;
opt [ mlen ] . litlen = llen ;
opt [ mlen ] . price = cost ;
DEBUGLOG ( 7 , " rPos:%3i => price:%3i (matchlen=%i) -- initial setup " ,
mlen , cost , mlen ) ;
} }
last_match_pos = firstMatch . len ;
{ int addLit ;
for ( addLit = 1 ; addLit < = TRAILING_LITERALS ; addLit + + ) {
opt [ last_match_pos + addLit ] . mlen = 1 ; /* literal */
opt [ last_match_pos + addLit ] . off = 0 ;
opt [ last_match_pos + addLit ] . litlen = addLit ;
opt [ last_match_pos + addLit ] . price = opt [ last_match_pos ] . price + LZ4HC_literalsPrice ( addLit ) ;
DEBUGLOG ( 7 , " rPos:%3i => price:%3i (litlen=%i) -- initial setup " ,
last_match_pos + addLit , opt [ last_match_pos + addLit ] . price , addLit ) ;
} }
/* check further positions */
for ( cur = 1 ; cur < last_match_pos ; cur + + ) {
const BYTE * const curPtr = ip + cur ;
LZ4HC_match_t newMatch ;
if ( curPtr > mflimit ) break ;
DEBUGLOG ( 7 , " rPos:%u[%u] vs [%u]%u " ,
cur , opt [ cur ] . price , opt [ cur + 1 ] . price , cur + 1 ) ;
if ( fullUpdate ) {
/* not useful to search here if next position has same (or lower) cost */
if ( ( opt [ cur + 1 ] . price < = opt [ cur ] . price )
/* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */
& & ( opt [ cur + MINMATCH ] . price < opt [ cur ] . price + 3 /*min seq price*/ ) )
continue ;
} else {
/* not useful to search here if next position has same (or lower) cost */
if ( opt [ cur + 1 ] . price < = opt [ cur ] . price ) continue ;
}
DEBUGLOG ( 7 , " search at rPos:%u " , cur ) ;
if ( fullUpdate )
2018-04-18 17:55:58 +00:00
newMatch = LZ4HC_FindLongerMatch ( ctx , curPtr , matchlimit , MINMATCH - 1 , nbSearches , dict ) ;
2018-02-25 08:32:09 +00:00
else
/* only test matches of minimum length; slightly faster, but misses a few bytes */
2018-04-18 17:55:58 +00:00
newMatch = LZ4HC_FindLongerMatch ( ctx , curPtr , matchlimit , last_match_pos - cur , nbSearches , dict ) ;
2018-02-25 08:32:09 +00:00
if ( ! newMatch . len ) continue ;
if ( ( ( size_t ) newMatch . len > sufficient_len )
| | ( newMatch . len + cur > = LZ4_OPT_NUM ) ) {
/* immediate encoding */
best_mlen = newMatch . len ;
best_off = newMatch . off ;
last_match_pos = cur + 1 ;
goto encode ;
}
/* before match : set price with literals at beginning */
{ int const baseLitlen = opt [ cur ] . litlen ;
int litlen ;
for ( litlen = 1 ; litlen < MINMATCH ; litlen + + ) {
int const price = opt [ cur ] . price - LZ4HC_literalsPrice ( baseLitlen ) + LZ4HC_literalsPrice ( baseLitlen + litlen ) ;
int const pos = cur + litlen ;
if ( price < opt [ pos ] . price ) {
opt [ pos ] . mlen = 1 ; /* literal */
opt [ pos ] . off = 0 ;
opt [ pos ] . litlen = baseLitlen + litlen ;
opt [ pos ] . price = price ;
DEBUGLOG ( 7 , " rPos:%3i => price:%3i (litlen=%i) " ,
pos , price , opt [ pos ] . litlen ) ;
} } }
/* set prices using match at position = cur */
{ int const matchML = newMatch . len ;
int ml = MINMATCH ;
assert ( cur + newMatch . len < LZ4_OPT_NUM ) ;
for ( ; ml < = matchML ; ml + + ) {
int const pos = cur + ml ;
int const offset = newMatch . off ;
int price ;
int ll ;
DEBUGLOG ( 7 , " testing price rPos %i (last_match_pos=%i) " ,
pos , last_match_pos ) ;
if ( opt [ cur ] . mlen = = 1 ) {
ll = opt [ cur ] . litlen ;
price = ( ( cur > ll ) ? opt [ cur - ll ] . price : 0 )
+ LZ4HC_sequencePrice ( ll , ml ) ;
} else {
ll = 0 ;
price = opt [ cur ] . price + LZ4HC_sequencePrice ( 0 , ml ) ;
}
if ( pos > last_match_pos + TRAILING_LITERALS | | price < = opt [ pos ] . price ) {
DEBUGLOG ( 7 , " rPos:%3i => price:%3i (matchlen=%i) " ,
pos , price , ml ) ;
assert ( pos < LZ4_OPT_NUM ) ;
if ( ( ml = = matchML ) /* last pos of last match */
& & ( last_match_pos < pos ) )
last_match_pos = pos ;
opt [ pos ] . mlen = ml ;
opt [ pos ] . off = offset ;
opt [ pos ] . litlen = ll ;
opt [ pos ] . price = price ;
} } }
/* complete following positions with literals */
{ int addLit ;
for ( addLit = 1 ; addLit < = TRAILING_LITERALS ; addLit + + ) {
opt [ last_match_pos + addLit ] . mlen = 1 ; /* literal */
opt [ last_match_pos + addLit ] . off = 0 ;
opt [ last_match_pos + addLit ] . litlen = addLit ;
opt [ last_match_pos + addLit ] . price = opt [ last_match_pos ] . price + LZ4HC_literalsPrice ( addLit ) ;
DEBUGLOG ( 7 , " rPos:%3i => price:%3i (litlen=%i) " , last_match_pos + addLit , opt [ last_match_pos + addLit ] . price , addLit ) ;
} }
} /* for (cur = 1; cur <= last_match_pos; cur++) */
best_mlen = opt [ last_match_pos ] . mlen ;
best_off = opt [ last_match_pos ] . off ;
cur = last_match_pos - best_mlen ;
encode : /* cur, last_match_pos, best_mlen, best_off must be set */
assert ( cur < LZ4_OPT_NUM ) ;
assert ( last_match_pos > = 1 ) ; /* == 1 when only one candidate */
2018-04-12 14:25:40 +00:00
DEBUGLOG ( 6 , " reverse traversal, looking for shortest path (last_match_pos=%i) " , last_match_pos ) ;
2018-02-25 08:32:09 +00:00
{ int candidate_pos = cur ;
int selected_matchLength = best_mlen ;
int selected_offset = best_off ;
while ( 1 ) { /* from end to beginning */
int const next_matchLength = opt [ candidate_pos ] . mlen ; /* can be 1, means literal */
int const next_offset = opt [ candidate_pos ] . off ;
2018-04-12 14:25:40 +00:00
DEBUGLOG ( 7 , " pos %i: sequence length %i " , candidate_pos , selected_matchLength ) ;
2018-02-25 08:32:09 +00:00
opt [ candidate_pos ] . mlen = selected_matchLength ;
opt [ candidate_pos ] . off = selected_offset ;
selected_matchLength = next_matchLength ;
selected_offset = next_offset ;
if ( next_matchLength > candidate_pos ) break ; /* last match elected, first match to encode */
assert ( next_matchLength > 0 ) ; /* can be 1, means literal */
candidate_pos - = next_matchLength ;
} }
/* encode all recorded sequences in order */
{ int rPos = 0 ; /* relative position (to ip) */
while ( rPos < last_match_pos ) {
int const ml = opt [ rPos ] . mlen ;
int const offset = opt [ rPos ] . off ;
if ( ml = = 1 ) { ip + + ; rPos + + ; continue ; } /* literal; note: can end up with several literals, in which case, skip them */
rPos + = ml ;
assert ( ml > = MINMATCH ) ;
assert ( ( offset > = 1 ) & & ( offset < = MAX_DISTANCE ) ) ;
opSaved = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml , ip - offset , limit , oend ) ) /* updates ip, op and anchor */
goto _dest_overflow ;
} }
} /* while (ip <= mflimit) */
_last_literals :
/* Encode Last Literals */
{ size_t lastRunSize = ( size_t ) ( iend - anchor ) ; /* literals */
size_t litLength = ( lastRunSize + 255 - RUN_MASK ) / 255 ;
size_t const totalSize = 1 + litLength + lastRunSize ;
if ( limit = = limitedDestSize ) oend + = LASTLITERALS ; /* restore correct value */
if ( limit & & ( op + totalSize > oend ) ) {
if ( limit = = limitedOutput ) return 0 ; /* Check output limit */
/* adapt lastRunSize to fill 'dst' */
lastRunSize = ( size_t ) ( oend - op ) - 1 ;
litLength = ( lastRunSize + 255 - RUN_MASK ) / 255 ;
lastRunSize - = litLength ;
}
ip = anchor + lastRunSize ;
if ( lastRunSize > = RUN_MASK ) {
size_t accumulator = lastRunSize - RUN_MASK ;
* op + + = ( RUN_MASK < < ML_BITS ) ;
for ( ; accumulator > = 255 ; accumulator - = 255 ) * op + + = 255 ;
* op + + = ( BYTE ) accumulator ;
} else {
* op + + = ( BYTE ) ( lastRunSize < < ML_BITS ) ;
}
memcpy ( op , anchor , lastRunSize ) ;
op + = lastRunSize ;
}
/* End */
* srcSizePtr = ( int ) ( ( ( const char * ) ip ) - source ) ;
return ( int ) ( ( char * ) op - dst ) ;
_dest_overflow :
if ( limit = = limitedDestSize ) {
op = opSaved ; /* restore correct out pointer */
goto _last_literals ;
}
return 0 ;
}