2013-12-30 17:16:52 +00:00
/*
2015-05-02 14:44:43 +00:00
LZ4 HC - High Compression Mode of LZ4
2017-03-16 22:10:38 +00:00
Copyright ( C ) 2011 - 2017 , Yann Collet .
2015-05-02 14:44:43 +00:00
BSD 2 - Clause License ( http : //www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms , with or without
modification , are permitted provided that the following conditions are
met :
* Redistributions of source code must retain the above copyright
notice , this list of conditions and the following disclaimer .
* Redistributions in binary form must reproduce the above
copyright notice , this list of conditions and the following disclaimer
in the documentation and / or other materials provided with the
distribution .
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
" AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
You can contact the author at :
2016-11-03 14:12:57 +00:00
- LZ4 source repository : https : //github.com/lz4/lz4
2015-05-02 14:44:43 +00:00
- LZ4 public forum : https : //groups.google.com/forum/#!forum/lz4c
2013-12-30 17:16:52 +00:00
*/
2016-11-12 23:50:29 +00:00
/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
2014-02-04 14:11:10 +00:00
2015-10-21 14:00:48 +00:00
/* *************************************
2015-05-02 14:44:43 +00:00
* Tuning Parameter
2015-10-21 14:00:48 +00:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2014-02-04 14:11:10 +00:00
2017-03-16 09:16:24 +00:00
/*! HEAPMODE :
* Select how default compression function will allocate workplace memory ,
* in stack ( 0 : fastest ) , or in heap ( 1 : requires malloc ( ) ) .
* Since workplace is rather large , heap mode is recommended .
2015-10-21 14:00:48 +00:00
*/
2016-11-12 23:50:29 +00:00
# ifndef LZ4HC_HEAPMODE
# define LZ4HC_HEAPMODE 1
# endif
2014-02-04 14:11:10 +00:00
2015-10-21 14:00:48 +00:00
2017-03-16 09:16:24 +00:00
/*=== Dependency ===*/
2017-11-03 18:28:28 +00:00
# define LZ4_HC_STATIC_LINKING_ONLY
2014-11-29 16:12:26 +00:00
# include "lz4hc.h"
2013-12-30 17:16:52 +00:00
2017-03-16 09:16:24 +00:00
/*=== Common LZ4 definitions ===*/
2014-11-29 19:19:39 +00:00
# if defined(__GNUC__)
# pragma GCC diagnostic ignored "-Wunused-function"
2013-12-30 17:16:52 +00:00
# endif
2014-11-29 19:19:39 +00:00
# if defined (__clang__)
# pragma clang diagnostic ignored "-Wunused-function"
2013-12-30 17:16:52 +00:00
# endif
2014-11-29 19:19:39 +00:00
# define LZ4_COMMONDEFS_ONLY
2017-03-16 22:10:38 +00:00
# include "lz4.c" /* LZ4_count, constants, mem */
2013-12-30 17:16:52 +00:00
2017-03-16 09:16:24 +00:00
/*=== Constants ===*/
2013-12-30 17:16:52 +00:00
# define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
2017-03-16 09:16:24 +00:00
/*=== Macros ===*/
2017-08-10 07:48:19 +00:00
# define MIN(a,b) ( (a) < (b) ? (a) : (b) )
# define MAX(a,b) ( (a) > (b) ? (a) : (b) )
2017-05-02 19:01:13 +00:00
# define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
# define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */
# define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */
2013-12-30 17:16:52 +00:00
2014-11-29 19:19:39 +00:00
static U32 LZ4HC_hashPtr ( const void * ptr ) { return HASH_FUNCTION ( LZ4_read32 ( ptr ) ) ; }
2013-12-30 17:16:52 +00:00
2014-11-29 19:19:39 +00:00
/**************************************
2015-05-02 14:44:43 +00:00
* HC Compression
2014-11-29 19:19:39 +00:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-11-12 15:29:54 +00:00
static void LZ4HC_init ( LZ4HC_CCtx_internal * hc4 , const BYTE * start )
2013-12-30 17:16:52 +00:00
{
MEM_INIT ( ( void * ) hc4 - > hashTable , 0 , sizeof ( hc4 - > hashTable ) ) ;
MEM_INIT ( hc4 - > chainTable , 0xFF , sizeof ( hc4 - > chainTable ) ) ;
2016-12-28 14:38:59 +00:00
hc4 - > nextToUpdate = 64 KB ;
2014-12-03 18:17:10 +00:00
hc4 - > base = start - 64 KB ;
hc4 - > end = start ;
hc4 - > dictBase = start - 64 KB ;
2014-10-20 23:12:55 +00:00
hc4 - > dictLimit = 64 KB ;
hc4 - > lowLimit = 64 KB ;
2013-12-30 17:16:52 +00:00
}
2014-02-04 14:11:10 +00:00
/* Update chains up to ip (excluded) */
2017-08-24 14:14:20 +00:00
LZ4_FORCE_INLINE void LZ4HC_Insert ( LZ4HC_CCtx_internal * hc4 , const BYTE * ip )
2013-12-30 17:16:52 +00:00
{
2016-09-03 02:06:01 +00:00
U16 * const chainTable = hc4 - > chainTable ;
U32 * const hashTable = hc4 - > hashTable ;
2014-10-18 10:18:14 +00:00
const BYTE * const base = hc4 - > base ;
2016-09-03 02:06:01 +00:00
U32 const target = ( U32 ) ( ip - base ) ;
2014-10-18 10:18:14 +00:00
U32 idx = hc4 - > nextToUpdate ;
2013-12-30 17:16:52 +00:00
2016-09-03 02:06:01 +00:00
while ( idx < target ) {
U32 const h = LZ4HC_hashPtr ( base + idx ) ;
size_t delta = idx - hashTable [ h ] ;
2013-12-30 17:16:52 +00:00
if ( delta > MAX_DISTANCE ) delta = MAX_DISTANCE ;
2017-05-02 19:01:13 +00:00
DELTANEXTU16 ( chainTable , idx ) = ( U16 ) delta ;
2016-09-03 02:06:01 +00:00
hashTable [ h ] = idx ;
2014-10-18 10:18:14 +00:00
idx + + ;
2013-12-30 17:16:52 +00:00
}
2014-08-01 18:10:21 +00:00
2014-10-18 10:18:14 +00:00
hc4 - > nextToUpdate = target ;
2013-12-30 17:16:52 +00:00
}
2017-10-09 06:55:42 +00:00
/** LZ4HC_countBack() :
* @ return : negative value , nb of common bytes before ip / match */
2017-11-07 19:22:57 +00:00
LZ4_FORCE_INLINE
int LZ4HC_countBack ( const BYTE * const ip , const BYTE * const match ,
const BYTE * const iMin , const BYTE * const mMin )
2017-10-09 06:55:42 +00:00
{
int back = 0 ;
while ( ( ip + back > iMin )
& & ( match + back > mMin )
& & ( ip [ back - 1 ] = = match [ back - 1 ] ) )
back - - ;
return back ;
}
2017-11-07 18:53:29 +00:00
/* LZ4HC_countPattern() :
* pattern32 must be a sample of repetitive pattern of length 1 , 2 or 4 ( but not 3 ! ) */
static unsigned LZ4HC_countPattern ( const BYTE * ip , const BYTE * const iEnd , U32 const pattern32 )
2017-10-09 06:55:42 +00:00
{
const BYTE * const iStart = ip ;
2017-11-07 18:53:29 +00:00
reg_t const pattern = ( sizeof ( pattern ) = = 8 ) ? ( reg_t ) pattern32 + ( ( ( reg_t ) pattern32 ) < < 32 ) : pattern32 ;
2017-10-09 06:55:42 +00:00
2017-11-07 18:53:29 +00:00
while ( likely ( ip < iEnd - ( sizeof ( pattern ) - 1 ) ) ) {
2017-10-09 06:55:42 +00:00
reg_t const diff = LZ4_read_ARCH ( ip ) ^ pattern ;
if ( ! diff ) { ip + = sizeof ( pattern ) ; continue ; }
ip + = LZ4_NbCommonBytes ( diff ) ;
return ( unsigned ) ( ip - iStart ) ;
}
2017-11-07 18:53:29 +00:00
if ( LZ4_isLittleEndian ( ) ) {
reg_t patternByte = pattern ;
while ( ( ip < iEnd ) & & ( * ip = = ( BYTE ) patternByte ) ) {
ip + + ; patternByte > > = 8 ;
}
} else { /* big endian */
U32 bitOffset = ( sizeof ( pattern ) * 8 ) - 8 ;
while ( ip < iEnd ) {
BYTE const byte = ( BYTE ) ( pattern > > bitOffset ) ;
if ( * ip ! = byte ) break ;
ip + + ; bitOffset - = 8 ;
}
}
2017-10-09 06:55:42 +00:00
return ( unsigned ) ( ip - iStart ) ;
}
2017-11-07 19:05:48 +00:00
/* LZ4HC_reverseCountPattern() :
* pattern must be a sample of repetitive pattern of length 1 , 2 or 4 ( but not 3 ! )
* read using natural platform endianess */
2017-10-09 06:55:42 +00:00
static unsigned LZ4HC_reverseCountPattern ( const BYTE * ip , const BYTE * const iLow , U32 pattern )
{
const BYTE * const iStart = ip ;
2017-11-07 19:05:48 +00:00
while ( likely ( ip > = iLow + 4 ) ) {
2017-10-09 06:55:42 +00:00
if ( LZ4_read32 ( ip - 4 ) ! = pattern ) break ;
ip - = 4 ;
}
while ( likely ( ip > iLow ) ) {
2017-11-07 19:05:48 +00:00
const BYTE * bytePtr = ( const BYTE * ) ( & pattern ) + 3 ; /* works for any endianess */
if ( ip [ - 1 ] ! = * bytePtr ) break ;
ip - - ; bytePtr - - ;
2017-10-09 06:55:42 +00:00
}
return ( unsigned ) ( iStart - ip ) ;
}
typedef enum { rep_untested , rep_not , rep_confirmed } repeat_state_e ;
2017-08-24 14:14:20 +00:00
LZ4_FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch (
2016-11-12 15:29:54 +00:00
LZ4HC_CCtx_internal * hc4 ,
2015-03-30 14:57:26 +00:00
const BYTE * const ip ,
const BYTE * const iLowLimit ,
const BYTE * const iHighLimit ,
2014-10-26 10:22:15 +00:00
int longest ,
const BYTE * * matchpos ,
const BYTE * * startpos ,
const int maxNbAttempts )
2013-12-30 17:16:52 +00:00
{
2014-10-18 10:18:14 +00:00
U16 * const chainTable = hc4 - > chainTable ;
U32 * const HashTable = hc4 - > hashTable ;
const BYTE * const base = hc4 - > base ;
2014-10-20 00:08:21 +00:00
const U32 dictLimit = hc4 - > dictLimit ;
2015-03-30 14:57:26 +00:00
const BYTE * const lowPrefixPtr = base + dictLimit ;
2017-10-09 06:55:42 +00:00
const U32 lowLimit = ( hc4 - > lowLimit + 64 KB > ( U32 ) ( ip - base ) ) ? hc4 - > lowLimit : ( U32 ) ( ip - base ) - MAX_DISTANCE ;
2014-10-20 00:08:21 +00:00
const BYTE * const dictBase = hc4 - > dictBase ;
2017-10-09 08:44:05 +00:00
int const delta = ( int ) ( ip - iLowLimit ) ;
2014-02-04 14:11:10 +00:00
int nbAttempts = maxNbAttempts ;
2017-10-21 00:04:29 +00:00
U32 const pattern = LZ4_read32 ( ip ) ;
2017-06-14 00:25:29 +00:00
U32 matchIndex ;
2017-10-09 06:55:42 +00:00
repeat_state_e repeat = rep_untested ;
size_t srcPatternLength = 0 ;
2013-12-30 17:16:52 +00:00
2017-11-02 21:53:06 +00:00
DEBUGLOG ( 7 , " LZ4HC_InsertAndGetWiderMatch " ) ;
2014-10-26 10:22:15 +00:00
/* First Match */
2013-12-30 17:16:52 +00:00
LZ4HC_Insert ( hc4 , ip ) ;
2014-10-20 00:08:21 +00:00
matchIndex = HashTable [ LZ4HC_hashPtr ( ip ) ] ;
2017-11-02 21:53:06 +00:00
DEBUGLOG ( 7 , " First match at index %u / %u (lowLimit) " ,
matchIndex , lowLimit ) ;
2013-12-30 17:16:52 +00:00
2016-09-03 02:06:01 +00:00
while ( ( matchIndex > = lowLimit ) & & ( nbAttempts ) ) {
2017-11-02 21:53:06 +00:00
DEBUGLOG ( 7 , " remaining attempts : %i " , nbAttempts ) ;
2013-12-30 17:16:52 +00:00
nbAttempts - - ;
2016-09-03 02:06:01 +00:00
if ( matchIndex > = dictLimit ) {
2017-06-14 00:25:29 +00:00
const BYTE * const matchPtr = base + matchIndex ;
2017-10-09 08:44:05 +00:00
if ( * ( iLowLimit + longest ) = = * ( matchPtr - delta + longest ) ) {
2017-10-21 00:04:29 +00:00
if ( LZ4_read32 ( matchPtr ) = = pattern ) {
2017-10-09 08:44:05 +00:00
int mlt = MINMATCH + LZ4_count ( ip + MINMATCH , matchPtr + MINMATCH , iHighLimit ) ;
#if 0
/* more generic but unfortunately slower ... */
int const back = LZ4HC_countBack ( ip , matchPtr , iLowLimit , lowPrefixPtr ) ;
# else
int back = 0 ;
while ( ( ip + back > iLowLimit )
& & ( matchPtr + back > lowPrefixPtr )
& & ( ip [ back - 1 ] = = matchPtr [ back - 1 ] ) ) {
back - - ;
}
# endif
mlt - = back ;
if ( mlt > longest ) {
longest = mlt ;
* matchpos = matchPtr + back ;
* startpos = ip + back ;
} }
}
2017-10-09 06:55:42 +00:00
} else { /* matchIndex < dictLimit */
2016-11-04 00:14:25 +00:00
const BYTE * const matchPtr = dictBase + matchIndex ;
2017-10-21 00:04:29 +00:00
if ( LZ4_read32 ( matchPtr ) = = pattern ) {
2017-05-02 19:01:13 +00:00
int mlt ;
2017-10-21 00:04:29 +00:00
int back = 0 ;
2014-10-20 00:08:21 +00:00
const BYTE * vLimit = ip + ( dictLimit - matchIndex ) ;
if ( vLimit > iHighLimit ) vLimit = iHighLimit ;
2015-03-30 14:57:26 +00:00
mlt = LZ4_count ( ip + MINMATCH , matchPtr + MINMATCH , vLimit ) + MINMATCH ;
2014-10-20 00:08:21 +00:00
if ( ( ip + mlt = = vLimit ) & & ( vLimit < iHighLimit ) )
2014-11-29 19:19:39 +00:00
mlt + = LZ4_count ( ip + mlt , base + dictLimit , iHighLimit ) ;
2017-10-09 06:55:42 +00:00
while ( ( ip + back > iLowLimit )
& & ( matchIndex + back > lowLimit )
& & ( ip [ back - 1 ] = = matchPtr [ back - 1 ] ) )
back - - ;
2014-10-26 10:22:15 +00:00
mlt - = back ;
2017-10-09 06:55:42 +00:00
if ( mlt > longest ) {
longest = mlt ;
* matchpos = base + matchIndex + back ;
* startpos = ip + back ;
} } }
{ U32 const nextOffset = DELTANEXTU16 ( chainTable , matchIndex ) ;
matchIndex - = nextOffset ;
2017-11-07 19:22:57 +00:00
if ( nextOffset = = 1 ) {
2017-10-09 06:55:42 +00:00
/* may be a repeated pattern */
if ( repeat = = rep_untested ) {
2017-10-21 00:04:29 +00:00
if ( ( pattern & 0xFFFF ) = = ( pattern > > 16 ) ) { /* is it enough ? */
2017-10-09 06:55:42 +00:00
repeat = rep_confirmed ;
2017-10-21 00:04:29 +00:00
srcPatternLength = LZ4HC_countPattern ( ip + 4 , iHighLimit , pattern ) + 4 ;
2017-10-09 06:55:42 +00:00
} else {
repeat = rep_not ;
} }
if ( ( repeat = = rep_confirmed ) /* proven repeated pattern (1-2-4) */
& & ( matchIndex > = dictLimit ) ) { /* same segment only */
const BYTE * const matchPtr = base + matchIndex ;
2017-10-21 00:04:29 +00:00
if ( LZ4_read32 ( matchPtr ) = = pattern ) { /* good candidate */
2017-10-09 06:55:42 +00:00
size_t const forwardPatternLength = LZ4HC_countPattern ( matchPtr + sizeof ( pattern ) , iHighLimit , pattern ) + sizeof ( pattern ) ;
const BYTE * const maxLowPtr = ( lowPrefixPtr + MAX_DISTANCE > = ip ) ? lowPrefixPtr : ip - MAX_DISTANCE ;
2017-10-21 00:04:29 +00:00
size_t const backLength = LZ4HC_reverseCountPattern ( matchPtr , maxLowPtr , pattern ) ;
2017-10-09 06:55:42 +00:00
size_t const currentSegmentLength = backLength + forwardPatternLength ;
if ( ( currentSegmentLength > = srcPatternLength ) /* current pattern segment large enough to contain full srcPatternLength */
& & ( forwardPatternLength < = srcPatternLength ) ) { /* haven't reached this position yet */
matchIndex + = ( U32 ) forwardPatternLength - ( U32 ) srcPatternLength ; /* best position, full pattern, might be followed by more match */
} else {
matchIndex - = ( U32 ) backLength ; /* let's go to farthest segment position, will find a match of length currentSegmentLength + maybe some back */
}
} } } }
} /* while ((matchIndex>=lowLimit) && (nbAttempts)) */
2013-12-30 17:16:52 +00:00
return longest ;
}
2017-10-09 08:50:28 +00:00
# if 1
2017-10-09 06:40:21 +00:00
LZ4_FORCE_INLINE
int LZ4HC_InsertAndFindBestMatch ( LZ4HC_CCtx_internal * const hc4 , /* Index table will be updated */
const BYTE * const ip , const BYTE * const iLimit ,
const BYTE * * matchpos ,
const int maxNbAttempts )
{
const BYTE * uselessPtr = ip ;
return LZ4HC_InsertAndGetWiderMatch ( hc4 , ip , ip , iLimit , MINMATCH - 1 , matchpos , & uselessPtr , maxNbAttempts ) ;
}
2017-10-09 07:31:12 +00:00
# else
LZ4_FORCE_INLINE
int LZ4HC_InsertAndFindBestMatch ( LZ4HC_CCtx_internal * const hc4 , /* Index table will be updated */
const BYTE * const ip , const BYTE * const iLimit ,
const BYTE * * matchpos ,
const int maxNbAttempts )
{
U16 * const chainTable = hc4 - > chainTable ;
U32 * const HashTable = hc4 - > hashTable ;
const BYTE * const base = hc4 - > base ;
const BYTE * const dictBase = hc4 - > dictBase ;
const U32 dictLimit = hc4 - > dictLimit ;
const U32 lowLimit = ( hc4 - > lowLimit + 64 KB > ( U32 ) ( ip - base ) ) ? hc4 - > lowLimit : ( U32 ) ( ip - base ) - ( 64 KB - 1 ) ;
U32 matchIndex ;
int nbAttempts = maxNbAttempts ;
size_t ml = 0 ;
/* HC4 match finder */
LZ4HC_Insert ( hc4 , ip ) ;
matchIndex = HashTable [ LZ4HC_hashPtr ( ip ) ] ;
while ( ( matchIndex > = lowLimit ) & & ( nbAttempts ) ) {
nbAttempts - - ;
if ( matchIndex > = dictLimit ) {
const BYTE * const match = base + matchIndex ;
if ( ( * ( match + ml ) = = * ( ip + ml ) ) /* can be longer */
& & ( LZ4_read32 ( match ) = = LZ4_read32 ( ip ) ) )
{
size_t const mlt = LZ4_count ( ip + MINMATCH , match + MINMATCH , iLimit ) + MINMATCH ;
if ( mlt > ml ) { ml = mlt ; * matchpos = match ; }
}
} else {
const BYTE * const match = dictBase + matchIndex ;
if ( LZ4_read32 ( match ) = = LZ4_read32 ( ip ) ) {
size_t mlt ;
const BYTE * vLimit = ip + ( dictLimit - matchIndex ) ;
if ( vLimit > iLimit ) vLimit = iLimit ;
mlt = LZ4_count ( ip + MINMATCH , match + MINMATCH , vLimit ) + MINMATCH ;
if ( ( ip + mlt = = vLimit ) & & ( vLimit < iLimit ) )
mlt + = LZ4_count ( ip + mlt , base + dictLimit , iLimit ) ;
if ( mlt > ml ) { ml = mlt ; * matchpos = base + matchIndex ; } /* virtual matchpos */
}
}
matchIndex - = DELTANEXTU16 ( chainTable , matchIndex ) ;
}
return ( int ) ml ;
}
# endif
2013-12-30 17:16:52 +00:00
2017-03-08 08:11:15 +00:00
typedef enum {
noLimit = 0 ,
limitedOutput = 1 ,
limitedDestSize = 2 ,
} limitedOutput_directive ;
2013-12-30 17:16:52 +00:00
2017-03-20 09:57:41 +00:00
/* LZ4HC_encodeSequence() :
* @ return : 0 if ok ,
* 1 if buffer issue detected */
2017-08-24 14:14:20 +00:00
LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
2014-10-26 10:22:15 +00:00
const BYTE * * ip ,
BYTE * * op ,
const BYTE * * anchor ,
int matchLength ,
const BYTE * const match ,
2017-03-07 21:30:54 +00:00
limitedOutput_directive limit ,
2014-10-26 10:22:15 +00:00
BYTE * oend )
2013-12-30 17:16:52 +00:00
{
2017-03-07 21:30:54 +00:00
size_t length ;
2017-05-02 19:01:13 +00:00
BYTE * const token = ( * op ) + + ;
2013-12-30 17:16:52 +00:00
2017-11-02 20:44:57 +00:00
# if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2)
static const BYTE * start = NULL ;
static U32 totalCost = 0 ;
2017-11-02 22:37:18 +00:00
U32 const pos = ( start = = NULL ) ? 0 : ( U32 ) ( * anchor - start ) ;
2017-11-02 20:44:57 +00:00
U32 const ll = ( U32 ) ( * ip - * anchor ) ;
U32 const llAdd = ( ll > = 15 ) ? ( ( ll - 15 ) / 255 ) + 1 : 0 ;
U32 const mlAdd = ( matchLength > = 19 ) ? ( ( matchLength - 19 ) / 255 ) + 1 : 0 ;
U32 const cost = 1 + llAdd + ll + 2 + mlAdd ;
if ( start = = NULL ) start = * anchor ; /* only works for single segment */
2017-11-03 01:54:18 +00:00
//g_debuglog_enable = (pos >= 2228) & (pos <= 2262);
2017-11-02 22:37:18 +00:00
DEBUGLOG ( 2 , " pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u " ,
2017-11-02 21:53:06 +00:00
pos ,
2017-11-02 20:44:57 +00:00
( U32 ) ( * ip - * anchor ) , matchLength , ( U32 ) ( * ip - match ) ,
cost , totalCost ) ;
2017-11-02 21:53:06 +00:00
totalCost + = cost ;
2014-12-16 21:03:16 +00:00
# endif
2014-10-19 16:41:42 +00:00
2014-02-04 14:11:10 +00:00
/* Encode Literal length */
2017-03-07 21:30:54 +00:00
length = ( size_t ) ( * ip - * anchor ) ;
if ( ( limit ) & & ( ( * op + ( length > > 8 ) + length + ( 2 + 1 + LASTLITERALS ) ) > oend ) ) return 1 ; /* Check output limit */
2017-03-16 09:16:24 +00:00
if ( length > = RUN_MASK ) {
size_t len = length - RUN_MASK ;
2017-03-07 21:30:54 +00:00
* token = ( RUN_MASK < < ML_BITS ) ;
for ( ; len > = 255 ; len - = 255 ) * ( * op ) + + = 255 ;
* ( * op ) + + = ( BYTE ) len ;
} else {
* token = ( BYTE ) ( length < < ML_BITS ) ;
}
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* Copy Literals */
2014-11-29 19:19:39 +00:00
LZ4_wildCopy ( * op , * anchor , ( * op ) + length ) ;
* op + = length ;
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* Encode Offset */
2014-11-29 19:19:39 +00:00
LZ4_writeLE16 ( * op , ( U16 ) ( * ip - match ) ) ; * op + = 2 ;
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* Encode MatchLength */
2017-11-02 20:44:57 +00:00
assert ( matchLength > = MINMATCH ) ;
2017-03-07 21:30:54 +00:00
length = ( size_t ) ( matchLength - MINMATCH ) ;
if ( ( limit ) & & ( * op + ( length > > 8 ) + ( 1 + LASTLITERALS ) > oend ) ) return 1 ; /* Check output limit */
if ( length > = ML_MASK ) {
2016-09-03 02:32:06 +00:00
* token + = ML_MASK ;
length - = ML_MASK ;
2017-03-07 21:30:54 +00:00
for ( ; length > = 510 ; length - = 510 ) { * ( * op ) + + = 255 ; * ( * op ) + + = 255 ; }
if ( length > = 255 ) { length - = 255 ; * ( * op ) + + = 255 ; }
2016-09-03 02:32:06 +00:00
* ( * op ) + + = ( BYTE ) length ;
} else {
* token + = ( BYTE ) ( length ) ;
}
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* Prepare next loop */
2013-12-30 17:16:52 +00:00
* ip + = matchLength ;
* anchor = * ip ;
return 0 ;
}
2017-03-16 09:16:24 +00:00
/* btopt */
2016-12-06 14:21:28 +00:00
# include "lz4opt.h"
2013-12-30 17:16:52 +00:00
2017-03-16 09:16:24 +00:00
2016-12-06 14:21:28 +00:00
static int LZ4HC_compress_hashChain (
2016-11-12 15:29:54 +00:00
LZ4HC_CCtx_internal * const ctx ,
2016-11-08 03:32:24 +00:00
const char * const source ,
char * const dest ,
2017-03-08 08:11:15 +00:00
int * srcSizePtr ,
2016-11-08 03:32:24 +00:00
int const maxOutputSize ,
2016-12-07 11:16:33 +00:00
unsigned maxNbAttempts ,
2014-10-26 10:22:15 +00:00
limitedOutput_directive limit
)
2013-12-30 17:16:52 +00:00
{
2017-03-08 08:11:15 +00:00
const int inputSize = * srcSizePtr ;
2013-12-30 17:16:52 +00:00
const BYTE * ip = ( const BYTE * ) source ;
const BYTE * anchor = ip ;
const BYTE * const iend = ip + inputSize ;
const BYTE * const mflimit = iend - MFLIMIT ;
const BYTE * const matchlimit = ( iend - LASTLITERALS ) ;
2017-03-08 08:11:15 +00:00
BYTE * optr = ( BYTE * ) dest ;
2013-12-30 17:16:52 +00:00
BYTE * op = ( BYTE * ) dest ;
2017-03-08 15:49:55 +00:00
BYTE * oend = op + maxOutputSize ;
2013-12-30 17:16:52 +00:00
int ml , ml2 , ml3 , ml0 ;
2016-09-03 02:06:01 +00:00
const BYTE * ref = NULL ;
const BYTE * start2 = NULL ;
const BYTE * ref2 = NULL ;
const BYTE * start3 = NULL ;
const BYTE * ref3 = NULL ;
2013-12-30 17:16:52 +00:00
const BYTE * start0 ;
const BYTE * ref0 ;
2014-10-18 10:18:14 +00:00
/* init */
2017-03-08 08:11:15 +00:00
* srcSizePtr = 0 ;
2017-03-09 09:19:24 +00:00
if ( limit = = limitedDestSize & & maxOutputSize < 1 ) return 0 ; /* Impossible to store anything */
2017-03-08 08:11:15 +00:00
if ( ( U32 ) inputSize > ( U32 ) LZ4_MAX_INPUT_SIZE ) return 0 ; /* Unsupported input size, too large (or negative) */
2017-03-08 15:49:55 +00:00
if ( limit = = limitedDestSize ) oend - = LASTLITERALS ; /* Hack for support limitations LZ4 decompressor */
2017-03-08 08:11:15 +00:00
if ( inputSize < LZ4_minLength ) goto _last_literals ; /* Input too small, no compression (all literals) */
2013-12-30 17:16:52 +00:00
ip + + ;
2014-02-04 14:11:10 +00:00
/* Main Loop */
2016-09-03 02:06:01 +00:00
while ( ip < mflimit ) {
2014-02-04 14:11:10 +00:00
ml = LZ4HC_InsertAndFindBestMatch ( ctx , ip , matchlimit , ( & ref ) , maxNbAttempts ) ;
2017-10-09 06:40:21 +00:00
if ( ml < MINMATCH ) { ip + + ; continue ; }
2013-12-30 17:16:52 +00:00
2014-02-04 14:11:10 +00:00
/* saved, in case we would skip too much */
2013-12-30 17:16:52 +00:00
start0 = ip ;
ref0 = ref ;
ml0 = ml ;
_Search2 :
if ( ip + ml < mflimit )
2016-11-08 03:32:24 +00:00
ml2 = LZ4HC_InsertAndGetWiderMatch ( ctx , ip + ml - 2 , ip + 0 , matchlimit , ml , & ref2 , & start2 , maxNbAttempts ) ;
2017-03-08 15:49:55 +00:00
else
ml2 = ml ;
2013-12-30 17:16:52 +00:00
2016-09-03 02:06:01 +00:00
if ( ml2 = = ml ) { /* No better match */
2017-03-08 08:11:15 +00:00
optr = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml , ref , limit , oend ) ) goto _dest_overflow ;
2013-12-30 17:16:52 +00:00
continue ;
}
2016-09-03 02:06:01 +00:00
if ( start0 < ip ) {
if ( start2 < ip + ml0 ) { /* empirical */
2013-12-30 17:16:52 +00:00
ip = start0 ;
ref = ref0 ;
ml = ml0 ;
}
}
2014-02-04 14:11:10 +00:00
/* Here, start0==ip */
2016-09-03 02:06:01 +00:00
if ( ( start2 - ip ) < 3 ) { /* First Match too small : removed */
2013-12-30 17:16:52 +00:00
ml = ml2 ;
ip = start2 ;
ref = ref2 ;
goto _Search2 ;
}
_Search3 :
2017-03-16 09:16:24 +00:00
/* At this stage, we have :
* ml2 > ml1 , and
* ip1 + 3 < = ip2 ( usually < ip1 + ml1 ) */
2016-09-03 02:06:01 +00:00
if ( ( start2 - ip ) < OPTIMAL_ML ) {
2013-12-30 17:16:52 +00:00
int correction ;
int new_ml = ml ;
if ( new_ml > OPTIMAL_ML ) new_ml = OPTIMAL_ML ;
if ( ip + new_ml > start2 + ml2 - MINMATCH ) new_ml = ( int ) ( start2 - ip ) + ml2 - MINMATCH ;
correction = new_ml - ( int ) ( start2 - ip ) ;
2016-09-03 02:06:01 +00:00
if ( correction > 0 ) {
2013-12-30 17:16:52 +00:00
start2 + = correction ;
ref2 + = correction ;
ml2 - = correction ;
}
}
2014-02-04 14:11:10 +00:00
/* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
2013-12-30 17:16:52 +00:00
if ( start2 + ml2 < mflimit )
2014-02-04 14:11:10 +00:00
ml3 = LZ4HC_InsertAndGetWiderMatch ( ctx , start2 + ml2 - 3 , start2 , matchlimit , ml2 , & ref3 , & start3 , maxNbAttempts ) ;
2017-03-07 14:11:48 +00:00
else
ml3 = ml2 ;
2013-12-30 17:16:52 +00:00
2016-09-03 02:06:01 +00:00
if ( ml3 = = ml2 ) { /* No better match : 2 sequences to encode */
2014-02-04 14:11:10 +00:00
/* ip & ref are known; Now for ml */
2013-12-30 17:16:52 +00:00
if ( start2 < ip + ml ) ml = ( int ) ( start2 - ip ) ;
2014-02-04 14:11:10 +00:00
/* Now, encode 2 sequences */
2017-03-08 08:11:15 +00:00
optr = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml , ref , limit , oend ) ) goto _dest_overflow ;
2013-12-30 17:16:52 +00:00
ip = start2 ;
2017-03-08 08:11:15 +00:00
optr = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml2 , ref2 , limit , oend ) ) goto _dest_overflow ;
2013-12-30 17:16:52 +00:00
continue ;
}
2016-09-03 02:06:01 +00:00
if ( start3 < ip + ml + 3 ) { /* Not enough space for match 2 : remove it */
if ( start3 > = ( ip + ml ) ) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
if ( start2 < ip + ml ) {
2013-12-30 17:16:52 +00:00
int correction = ( int ) ( ip + ml - start2 ) ;
start2 + = correction ;
ref2 + = correction ;
ml2 - = correction ;
2016-09-03 02:06:01 +00:00
if ( ml2 < MINMATCH ) {
2013-12-30 17:16:52 +00:00
start2 = start3 ;
ref2 = ref3 ;
ml2 = ml3 ;
}
}
2017-03-08 08:11:15 +00:00
optr = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml , ref , limit , oend ) ) goto _dest_overflow ;
2013-12-30 17:16:52 +00:00
ip = start3 ;
ref = ref3 ;
ml = ml3 ;
start0 = start2 ;
ref0 = ref2 ;
ml0 = ml2 ;
goto _Search2 ;
}
start2 = start3 ;
ref2 = ref3 ;
ml2 = ml3 ;
goto _Search3 ;
}
2014-02-04 14:11:10 +00:00
/*
2014-10-26 10:22:15 +00:00
* OK , now we have 3 ascending matches ; let ' s write at least the first one
* ip & ref are known ; Now for ml
*/
2016-09-03 02:06:01 +00:00
if ( start2 < ip + ml ) {
if ( ( start2 - ip ) < ( int ) ML_MASK ) {
2013-12-30 17:16:52 +00:00
int correction ;
if ( ml > OPTIMAL_ML ) ml = OPTIMAL_ML ;
if ( ip + ml > start2 + ml2 - MINMATCH ) ml = ( int ) ( start2 - ip ) + ml2 - MINMATCH ;
correction = ml - ( int ) ( start2 - ip ) ;
2016-09-03 02:06:01 +00:00
if ( correction > 0 ) {
2013-12-30 17:16:52 +00:00
start2 + = correction ;
ref2 + = correction ;
ml2 - = correction ;
}
2016-09-03 02:06:01 +00:00
} else {
2013-12-30 17:16:52 +00:00
ml = ( int ) ( start2 - ip ) ;
}
}
2017-03-08 08:11:15 +00:00
optr = op ;
if ( LZ4HC_encodeSequence ( & ip , & op , & anchor , ml , ref , limit , oend ) ) goto _dest_overflow ;
2013-12-30 17:16:52 +00:00
ip = start2 ;
ref = ref2 ;
ml = ml2 ;
start2 = start3 ;
ref2 = ref3 ;
ml2 = ml3 ;
goto _Search3 ;
}
2017-03-08 08:11:15 +00:00
_last_literals :
2014-02-04 14:11:10 +00:00
/* Encode Last Literals */
2017-03-16 09:16:24 +00:00
{ size_t lastRunSize = ( size_t ) ( iend - anchor ) ; /* literals */
size_t litLength = ( lastRunSize + 255 - RUN_MASK ) / 255 ;
size_t const totalSize = 1 + litLength + lastRunSize ;
2017-03-08 15:49:55 +00:00
if ( limit = = limitedDestSize ) oend + = LASTLITERALS ; /* restore correct value */
2017-03-08 08:11:15 +00:00
if ( limit & & ( op + totalSize > oend ) ) {
if ( limit = = limitedOutput ) return 0 ; /* Check output limit */
2017-03-09 09:19:24 +00:00
/* adapt lastRunSize to fill 'dest' */
2017-03-08 08:11:15 +00:00
lastRunSize = ( size_t ) ( oend - op ) - 1 ;
litLength = ( lastRunSize + 255 - RUN_MASK ) / 255 ;
lastRunSize - = litLength ;
}
ip = anchor + lastRunSize ;
2017-03-07 14:11:48 +00:00
if ( lastRunSize > = RUN_MASK ) {
size_t accumulator = lastRunSize - RUN_MASK ;
* op + + = ( RUN_MASK < < ML_BITS ) ;
for ( ; accumulator > = 255 ; accumulator - = 255 ) * op + + = 255 ;
* op + + = ( BYTE ) accumulator ;
} else {
* op + + = ( BYTE ) ( lastRunSize < < ML_BITS ) ;
}
memcpy ( op , anchor , lastRunSize ) ;
op + = lastRunSize ;
2013-12-30 17:16:52 +00:00
}
2014-02-04 14:11:10 +00:00
/* End */
2017-03-08 08:11:15 +00:00
* srcSizePtr = ( int ) ( ( ( const char * ) ip ) - source ) ;
2013-12-30 17:16:52 +00:00
return ( int ) ( ( ( char * ) op ) - dest ) ;
2017-03-08 08:11:15 +00:00
_dest_overflow :
if ( limit = = limitedDestSize ) {
op = optr ; /* restore correct out pointer */
goto _last_literals ;
}
return 0 ;
2013-12-30 17:16:52 +00:00
}
2016-12-06 14:21:28 +00:00
static int LZ4HC_compress_generic (
LZ4HC_CCtx_internal * const ctx ,
2017-03-16 22:10:38 +00:00
const char * const src ,
char * const dst ,
int * const srcSizePtr ,
int const dstCapacity ,
int cLevel ,
2016-12-06 14:21:28 +00:00
limitedOutput_directive limit
)
{
2017-11-03 17:48:55 +00:00
ctx - > end + = * srcSizePtr ;
if ( cLevel < 1 ) cLevel = LZ4HC_CLEVEL_DEFAULT ; /* note : convention is different from lz4frame, maybe something to review */
2017-03-16 22:10:38 +00:00
if ( cLevel > 9 ) {
2017-03-16 22:41:30 +00:00
if ( limit = = limitedDestSize ) cLevel = 10 ;
2017-03-16 22:10:38 +00:00
switch ( cLevel ) {
2017-03-16 09:16:24 +00:00
case 10 :
2017-11-03 17:30:52 +00:00
return LZ4HC_compress_hashChain ( ctx , src , dst , srcSizePtr , dstCapacity , 1 < < 12 , limit ) ;
2017-03-16 09:16:24 +00:00
case 11 :
2017-11-03 17:30:52 +00:00
return LZ4HC_compress_optimal ( ctx , src , dst , * srcSizePtr , dstCapacity , limit , 512 , 128 , 0 ) ;
2016-12-06 18:11:53 +00:00
default :
2017-06-19 05:09:36 +00:00
/* fall-through */
2017-03-16 09:16:24 +00:00
case 12 :
2017-11-03 17:30:52 +00:00
return LZ4HC_compress_optimal ( ctx , src , dst , * srcSizePtr , dstCapacity , limit , 1 < < 13 , LZ4_OPT_NUM , 1 ) ;
2016-12-06 14:21:28 +00:00
}
}
2017-03-16 22:10:38 +00:00
return LZ4HC_compress_hashChain ( ctx , src , dst , srcSizePtr , dstCapacity , 1 < < ( cLevel - 1 ) , limit ) ; /* levels 1-9 */
2016-12-06 14:21:28 +00:00
}
2016-11-12 23:50:29 +00:00
int LZ4_sizeofStateHC ( void ) { return sizeof ( LZ4_streamHC_t ) ; }
2013-12-30 17:16:52 +00:00
2017-03-16 22:10:38 +00:00
int LZ4_compress_HC_extStateHC ( void * state , const char * src , char * dst , int srcSize , int dstCapacity , int compressionLevel )
2013-12-30 17:16:52 +00:00
{
2017-03-16 09:16:24 +00:00
LZ4HC_CCtx_internal * const ctx = & ( ( LZ4_streamHC_t * ) state ) - > internal_donotuse ;
2014-02-04 14:11:10 +00:00
if ( ( ( size_t ) ( state ) & ( sizeof ( void * ) - 1 ) ) ! = 0 ) return 0 ; /* Error : state is not aligned for pointers (32 or 64 bits) */
2016-11-11 21:00:02 +00:00
LZ4HC_init ( ctx , ( const BYTE * ) src ) ;
2017-03-16 22:10:38 +00:00
if ( dstCapacity < LZ4_compressBound ( srcSize ) )
return LZ4HC_compress_generic ( ctx , src , dst , & srcSize , dstCapacity , compressionLevel , limitedOutput ) ;
2015-04-11 11:28:09 +00:00
else
2017-03-16 22:10:38 +00:00
return LZ4HC_compress_generic ( ctx , src , dst , & srcSize , dstCapacity , compressionLevel , noLimit ) ;
2013-12-30 17:16:52 +00:00
}
2017-03-16 22:10:38 +00:00
int LZ4_compress_HC ( const char * src , char * dst , int srcSize , int dstCapacity , int compressionLevel )
2013-12-30 17:16:52 +00:00
{
2016-11-12 23:50:29 +00:00
# if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
2016-11-13 01:14:57 +00:00
LZ4_streamHC_t * const statePtr = ( LZ4_streamHC_t * ) malloc ( sizeof ( LZ4_streamHC_t ) ) ;
2015-10-21 14:00:48 +00:00
# else
2016-11-11 21:00:02 +00:00
LZ4_streamHC_t state ;
LZ4_streamHC_t * const statePtr = & state ;
2015-10-21 14:00:48 +00:00
# endif
2017-03-16 22:10:38 +00:00
int const cSize = LZ4_compress_HC_extStateHC ( statePtr , src , dst , srcSize , dstCapacity , compressionLevel ) ;
2016-11-12 23:50:29 +00:00
# if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
2015-10-21 14:00:48 +00:00
free ( statePtr ) ;
# endif
return cSize ;
2013-12-30 17:16:52 +00:00
}
2017-03-16 22:10:38 +00:00
/* LZ4_compress_HC_destSize() :
2017-11-03 09:01:20 +00:00
* only compatible with Hash Chain match finder */
2017-03-16 22:10:38 +00:00
int LZ4_compress_HC_destSize ( void * LZ4HC_Data , const char * source , char * dest , int * sourceSizePtr , int targetDestSize , int cLevel )
{
LZ4HC_CCtx_internal * const ctx = & ( ( LZ4_streamHC_t * ) LZ4HC_Data ) - > internal_donotuse ;
LZ4HC_init ( ctx , ( const BYTE * ) source ) ;
return LZ4HC_compress_generic ( ctx , source , dest , sourceSizePtr , targetDestSize , cLevel , limitedDestSize ) ;
}
2013-12-30 17:16:52 +00:00
2014-12-03 18:17:10 +00:00
2014-10-18 10:18:14 +00:00
/**************************************
2015-05-02 14:44:43 +00:00
* Streaming Functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2014-10-18 10:18:14 +00:00
/* allocation */
LZ4_streamHC_t * LZ4_createStreamHC ( void ) { return ( LZ4_streamHC_t * ) malloc ( sizeof ( LZ4_streamHC_t ) ) ; }
2017-08-09 23:51:19 +00:00
int LZ4_freeStreamHC ( LZ4_streamHC_t * LZ4_streamHCPtr ) {
if ( ! LZ4_streamHCPtr ) return 0 ; /* support free on NULL */
free ( LZ4_streamHCPtr ) ;
return 0 ;
}
2013-12-30 17:16:52 +00:00
2014-10-18 10:18:14 +00:00
/* initialization */
void LZ4_resetStreamHC ( LZ4_streamHC_t * LZ4_streamHCPtr , int compressionLevel )
2013-12-30 17:16:52 +00:00
{
2016-11-12 15:29:54 +00:00
LZ4_STATIC_ASSERT ( sizeof ( LZ4HC_CCtx_internal ) < = sizeof ( size_t ) * LZ4_STREAMHCSIZE_SIZET ) ; /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
2016-11-11 21:00:02 +00:00
LZ4_streamHCPtr - > internal_donotuse . base = NULL ;
2017-11-03 18:28:28 +00:00
LZ4_setCompressionLevel ( LZ4_streamHCPtr , compressionLevel ) ;
2014-02-04 14:11:10 +00:00
}
2017-08-10 07:48:19 +00:00
void LZ4_setCompressionLevel ( LZ4_streamHC_t * LZ4_streamHCPtr , int compressionLevel )
{
2017-11-03 18:28:28 +00:00
if ( compressionLevel < 1 ) compressionLevel = 1 ;
if ( compressionLevel > LZ4HC_CLEVEL_MAX ) compressionLevel = LZ4HC_CLEVEL_MAX ;
2017-08-10 07:48:19 +00:00
LZ4_streamHCPtr - > internal_donotuse . compressionLevel = compressionLevel ;
}
2014-10-18 10:18:14 +00:00
int LZ4_loadDictHC ( LZ4_streamHC_t * LZ4_streamHCPtr , const char * dictionary , int dictSize )
2014-02-04 14:11:10 +00:00
{
2017-03-16 09:16:24 +00:00
LZ4HC_CCtx_internal * const ctxPtr = & LZ4_streamHCPtr - > internal_donotuse ;
2016-09-03 02:06:01 +00:00
if ( dictSize > 64 KB ) {
2014-11-02 21:32:12 +00:00
dictionary + = dictSize - 64 KB ;
dictSize = 64 KB ;
}
2014-12-03 18:17:10 +00:00
LZ4HC_init ( ctxPtr , ( const BYTE * ) dictionary ) ;
ctxPtr - > end = ( const BYTE * ) dictionary + dictSize ;
2017-11-03 08:18:12 +00:00
if ( dictSize > = 4 ) LZ4HC_Insert ( ctxPtr , ctxPtr - > end - 3 ) ;
2014-11-02 21:32:12 +00:00
return dictSize ;
2014-10-18 10:18:14 +00:00
}
/* compression */
2016-11-12 15:29:54 +00:00
static void LZ4HC_setExternalDict ( LZ4HC_CCtx_internal * ctxPtr , const BYTE * newBlock )
2014-12-03 18:17:10 +00:00
{
2017-11-03 08:18:12 +00:00
if ( ctxPtr - > end > = ctxPtr - > base + 4 ) LZ4HC_Insert ( ctxPtr , ctxPtr - > end - 3 ) ; /* Referencing remaining dictionary content */
2016-12-09 16:16:35 +00:00
2014-12-03 18:17:10 +00:00
/* Only one memory segment for extDict, so any previous extDict is lost at this stage */
ctxPtr - > lowLimit = ctxPtr - > dictLimit ;
ctxPtr - > dictLimit = ( U32 ) ( ctxPtr - > end - ctxPtr - > base ) ;
ctxPtr - > dictBase = ctxPtr - > base ;
ctxPtr - > base = newBlock - ctxPtr - > dictLimit ;
ctxPtr - > end = newBlock ;
2016-12-28 14:38:59 +00:00
ctxPtr - > nextToUpdate = ctxPtr - > dictLimit ; /* match referencing will resume from there */
2014-12-03 18:17:10 +00:00
}
2016-11-11 21:00:02 +00:00
static int LZ4_compressHC_continue_generic ( LZ4_streamHC_t * LZ4_streamHCPtr ,
2017-03-16 22:41:30 +00:00
const char * src , char * dst ,
int * srcSizePtr , int dstCapacity ,
limitedOutput_directive limit )
2014-10-22 07:07:56 +00:00
{
2017-03-16 09:16:24 +00:00
LZ4HC_CCtx_internal * const ctxPtr = & LZ4_streamHCPtr - > internal_donotuse ;
2014-10-22 07:07:56 +00:00
/* auto-init if forgotten */
2017-03-16 22:41:30 +00:00
if ( ctxPtr - > base = = NULL ) LZ4HC_init ( ctxPtr , ( const BYTE * ) src ) ;
2014-10-22 07:07:56 +00:00
2014-10-26 10:22:15 +00:00
/* Check overflow */
2016-09-03 02:06:01 +00:00
if ( ( size_t ) ( ctxPtr - > end - ctxPtr - > base ) > 2 GB ) {
2014-12-03 18:17:10 +00:00
size_t dictSize = ( size_t ) ( ctxPtr - > end - ctxPtr - > base ) - ctxPtr - > dictLimit ;
2014-10-26 10:22:15 +00:00
if ( dictSize > 64 KB ) dictSize = 64 KB ;
2016-11-11 21:00:02 +00:00
LZ4_loadDictHC ( LZ4_streamHCPtr , ( const char * ) ( ctxPtr - > end ) - dictSize , ( int ) dictSize ) ;
2014-10-26 10:22:15 +00:00
}
/* Check if blocks follow each other */
2017-03-16 22:41:30 +00:00
if ( ( const BYTE * ) src ! = ctxPtr - > end ) LZ4HC_setExternalDict ( ctxPtr , ( const BYTE * ) src ) ;
2014-10-22 07:07:56 +00:00
2014-10-26 10:22:15 +00:00
/* Check overlapping input/dictionary space */
2017-03-16 22:41:30 +00:00
{ const BYTE * sourceEnd = ( const BYTE * ) src + * srcSizePtr ;
2016-09-03 02:06:01 +00:00
const BYTE * const dictBegin = ctxPtr - > dictBase + ctxPtr - > lowLimit ;
const BYTE * const dictEnd = ctxPtr - > dictBase + ctxPtr - > dictLimit ;
2017-03-16 22:41:30 +00:00
if ( ( sourceEnd > dictBegin ) & & ( ( const BYTE * ) src < dictEnd ) ) {
2014-10-25 19:52:10 +00:00
if ( sourceEnd > dictEnd ) sourceEnd = dictEnd ;
2014-12-03 18:17:10 +00:00
ctxPtr - > lowLimit = ( U32 ) ( sourceEnd - ctxPtr - > dictBase ) ;
if ( ctxPtr - > dictLimit - ctxPtr - > lowLimit < 4 ) ctxPtr - > lowLimit = ctxPtr - > dictLimit ;
2014-10-22 07:07:56 +00:00
}
}
2017-03-16 22:41:30 +00:00
return LZ4HC_compress_generic ( ctxPtr , src , dst , srcSizePtr , dstCapacity , ctxPtr - > compressionLevel , limit ) ;
2014-10-22 07:07:56 +00:00
}
2017-03-16 22:10:38 +00:00
int LZ4_compress_HC_continue ( LZ4_streamHC_t * LZ4_streamHCPtr , const char * src , char * dst , int srcSize , int dstCapacity )
2014-10-18 10:18:14 +00:00
{
2017-03-16 22:10:38 +00:00
if ( dstCapacity < LZ4_compressBound ( srcSize ) )
2017-03-16 22:41:30 +00:00
return LZ4_compressHC_continue_generic ( LZ4_streamHCPtr , src , dst , & srcSize , dstCapacity , limitedOutput ) ;
2015-04-11 11:28:09 +00:00
else
2017-03-16 22:41:30 +00:00
return LZ4_compressHC_continue_generic ( LZ4_streamHCPtr , src , dst , & srcSize , dstCapacity , noLimit ) ;
2014-10-18 10:18:14 +00:00
}
2017-03-16 22:41:30 +00:00
int LZ4_compress_HC_continue_destSize ( LZ4_streamHC_t * LZ4_streamHCPtr , const char * src , char * dst , int * srcSizePtr , int targetDestSize )
2017-03-08 08:11:15 +00:00
{
2017-03-16 22:41:30 +00:00
return LZ4_compressHC_continue_generic ( LZ4_streamHCPtr , src , dst , srcSizePtr , targetDestSize , limitedDestSize ) ;
2017-03-08 08:11:15 +00:00
}
2017-03-16 09:16:24 +00:00
2014-10-18 10:18:14 +00:00
/* dictionary saving */
int LZ4_saveDictHC ( LZ4_streamHC_t * LZ4_streamHCPtr , char * safeBuffer , int dictSize )
{
2016-11-12 15:29:54 +00:00
LZ4HC_CCtx_internal * const streamPtr = & LZ4_streamHCPtr - > internal_donotuse ;
2016-09-03 02:06:01 +00:00
int const prefixSize = ( int ) ( streamPtr - > end - ( streamPtr - > base + streamPtr - > dictLimit ) ) ;
2014-10-18 10:18:14 +00:00
if ( dictSize > 64 KB ) dictSize = 64 KB ;
2014-11-02 21:32:12 +00:00
if ( dictSize < 4 ) dictSize = 0 ;
if ( dictSize > prefixSize ) dictSize = prefixSize ;
2015-04-12 08:29:52 +00:00
memmove ( safeBuffer , streamPtr - > end - dictSize , dictSize ) ;
2016-09-03 02:06:01 +00:00
{ U32 const endIndex = ( U32 ) ( streamPtr - > end - streamPtr - > base ) ;
2014-11-02 21:32:12 +00:00
streamPtr - > end = ( const BYTE * ) safeBuffer + dictSize ;
streamPtr - > base = streamPtr - > end - endIndex ;
streamPtr - > dictLimit = endIndex - dictSize ;
streamPtr - > lowLimit = endIndex - dictSize ;
2014-12-16 01:13:19 +00:00
if ( streamPtr - > nextToUpdate < streamPtr - > dictLimit ) streamPtr - > nextToUpdate = streamPtr - > dictLimit ;
2014-11-02 21:32:12 +00:00
}
2014-10-18 10:18:14 +00:00
return dictSize ;
2013-12-30 17:16:52 +00:00
}
2014-10-18 10:18:14 +00:00
/***********************************
2015-05-02 14:44:43 +00:00
* Deprecated Functions
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-11-12 23:50:29 +00:00
/* These functions currently generate deprecation warnings */
2015-04-11 11:28:09 +00:00
/* Deprecated compression functions */
2015-05-03 19:57:21 +00:00
int LZ4_compressHC ( const char * src , char * dst , int srcSize ) { return LZ4_compress_HC ( src , dst , srcSize , LZ4_compressBound ( srcSize ) , 0 ) ; }
int LZ4_compressHC_limitedOutput ( const char * src , char * dst , int srcSize , int maxDstSize ) { return LZ4_compress_HC ( src , dst , srcSize , maxDstSize , 0 ) ; }
int LZ4_compressHC2 ( const char * src , char * dst , int srcSize , int cLevel ) { return LZ4_compress_HC ( src , dst , srcSize , LZ4_compressBound ( srcSize ) , cLevel ) ; }
int LZ4_compressHC2_limitedOutput ( const char * src , char * dst , int srcSize , int maxDstSize , int cLevel ) { return LZ4_compress_HC ( src , dst , srcSize , maxDstSize , cLevel ) ; }
int LZ4_compressHC_withStateHC ( void * state , const char * src , char * dst , int srcSize ) { return LZ4_compress_HC_extStateHC ( state , src , dst , srcSize , LZ4_compressBound ( srcSize ) , 0 ) ; }
int LZ4_compressHC_limitedOutput_withStateHC ( void * state , const char * src , char * dst , int srcSize , int maxDstSize ) { return LZ4_compress_HC_extStateHC ( state , src , dst , srcSize , maxDstSize , 0 ) ; }
int LZ4_compressHC2_withStateHC ( void * state , const char * src , char * dst , int srcSize , int cLevel ) { return LZ4_compress_HC_extStateHC ( state , src , dst , srcSize , LZ4_compressBound ( srcSize ) , cLevel ) ; }
int LZ4_compressHC2_limitedOutput_withStateHC ( void * state , const char * src , char * dst , int srcSize , int maxDstSize , int cLevel ) { return LZ4_compress_HC_extStateHC ( state , src , dst , srcSize , maxDstSize , cLevel ) ; }
int LZ4_compressHC_continue ( LZ4_streamHC_t * ctx , const char * src , char * dst , int srcSize ) { return LZ4_compress_HC_continue ( ctx , src , dst , srcSize , LZ4_compressBound ( srcSize ) ) ; }
int LZ4_compressHC_limitedOutput_continue ( LZ4_streamHC_t * ctx , const char * src , char * dst , int srcSize , int maxDstSize ) { return LZ4_compress_HC_continue ( ctx , src , dst , srcSize , maxDstSize ) ; }
2015-04-11 11:28:09 +00:00
/* Deprecated streaming functions */
2014-10-18 10:18:14 +00:00
int LZ4_sizeofStreamStateHC ( void ) { return LZ4_STREAMHCSIZE ; }
2015-05-06 00:58:24 +00:00
int LZ4_resetStreamStateHC ( void * state , char * inputBuffer )
2014-10-18 10:18:14 +00:00
{
2016-11-12 15:29:54 +00:00
LZ4HC_CCtx_internal * ctx = & ( ( LZ4_streamHC_t * ) state ) - > internal_donotuse ;
2014-10-18 10:18:14 +00:00
if ( ( ( ( size_t ) state ) & ( sizeof ( void * ) - 1 ) ) ! = 0 ) return 1 ; /* Error : pointer is not aligned for pointer (32 or 64 bits) */
2016-11-11 21:00:02 +00:00
LZ4HC_init ( ctx , ( const BYTE * ) inputBuffer ) ;
ctx - > inputBuffer = ( BYTE * ) inputBuffer ;
2014-10-18 10:18:14 +00:00
return 0 ;
}
2015-05-06 00:58:24 +00:00
void * LZ4_createHC ( char * inputBuffer )
2014-10-18 10:18:14 +00:00
{
2016-11-11 21:00:02 +00:00
LZ4_streamHC_t * hc4 = ( LZ4_streamHC_t * ) ALLOCATOR ( 1 , sizeof ( LZ4_streamHC_t ) ) ;
2015-06-29 03:51:11 +00:00
if ( hc4 = = NULL ) return NULL ; /* not enough memory */
2016-11-11 21:00:02 +00:00
LZ4HC_init ( & hc4 - > internal_donotuse , ( const BYTE * ) inputBuffer ) ;
hc4 - > internal_donotuse . inputBuffer = ( BYTE * ) inputBuffer ;
2014-10-18 10:18:14 +00:00
return hc4 ;
}
2017-08-09 23:51:19 +00:00
int LZ4_freeHC ( void * LZ4HC_Data ) {
if ( ! LZ4HC_Data ) return 0 ; /* support free on NULL */
FREEMEM ( LZ4HC_Data ) ;
return 0 ;
}
2014-10-18 10:18:14 +00:00
2017-03-16 22:10:38 +00:00
int LZ4_compressHC2_continue ( void * LZ4HC_Data , const char * src , char * dst , int srcSize , int cLevel )
2014-10-18 10:18:14 +00:00
{
2017-03-16 22:10:38 +00:00
return LZ4HC_compress_generic ( & ( ( LZ4_streamHC_t * ) LZ4HC_Data ) - > internal_donotuse , src , dst , & srcSize , 0 , cLevel , noLimit ) ;
2014-10-18 10:18:14 +00:00
}
2013-12-30 17:16:52 +00:00
2017-03-16 22:10:38 +00:00
int LZ4_compressHC2_limitedOutput_continue ( void * LZ4HC_Data , const char * src , char * dst , int srcSize , int dstCapacity , int cLevel )
2014-02-04 14:11:10 +00:00
{
2017-03-16 22:10:38 +00:00
return LZ4HC_compress_generic ( & ( ( LZ4_streamHC_t * ) LZ4HC_Data ) - > internal_donotuse , src , dst , & srcSize , dstCapacity , cLevel , limitedOutput ) ;
2014-02-04 14:11:10 +00:00
}
2014-10-18 10:18:14 +00:00
char * LZ4_slideInputBufferHC ( void * LZ4HC_Data )
{
2016-11-12 23:50:29 +00:00
LZ4HC_CCtx_internal * const hc4 = & ( ( LZ4_streamHC_t * ) LZ4HC_Data ) - > internal_donotuse ;
int const dictSize = LZ4_saveDictHC ( ( LZ4_streamHC_t * ) LZ4HC_Data , ( char * ) ( hc4 - > inputBuffer ) , 64 KB ) ;
2014-12-03 18:17:10 +00:00
return ( char * ) ( hc4 - > inputBuffer + dictSize ) ;
2014-10-18 10:18:14 +00:00
}