2017-08-18 23:52:05 +00:00
/*
2016-08-30 17:04:33 +00:00
* Copyright ( c ) 2016 - present , Yann Collet , Facebook , Inc .
* All rights reserved .
*
2017-08-18 23:52:05 +00:00
* This source code is licensed under both the BSD - style license ( found in the
* LICENSE file in the root directory of this source tree ) and the GPLv2 ( found
* in the COPYING file in the root directory of this source tree ) .
2017-09-08 07:09:23 +00:00
* You may select , at your option , one of the above - listed licenses .
2016-08-30 17:04:33 +00:00
*/
2015-11-11 12:43:58 +00:00
/* ***************************************************************
* Tuning parameters
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*!
* HEAPMODE :
2018-03-20 20:40:29 +00:00
* Select how default decompression function ZSTD_decompress ( ) allocates its context ,
* on stack ( 0 ) , or into heap ( 1 , default ; requires malloc ( ) ) .
* Note that functions with explicit context such as ZSTD_decompressDCtx ( ) are unaffected .
2015-11-11 12:43:58 +00:00
*/
# ifndef ZSTD_HEAPMODE
# define ZSTD_HEAPMODE 1
2016-01-11 11:56:11 +00:00
# endif
2015-11-11 12:43:58 +00:00
/*!
* LEGACY_SUPPORT :
faster decoding in 32-bits mode for long offsets (tentative)
On my laptop:
Before:
./zstd32 -b --zstd=wlog=27 silesia.tar enwik8 -S
3#silesia.tar : 211984896 -> 66683478 (3.179), 97.6 MB/s , 400.7 MB/s
3#enwik8 : 100000000 -> 35643153 (2.806), 76.5 MB/s , 303.2 MB/s
After:
./zstd32 -b --zstd=wlog=27 silesia.tar enwik8 -S
3#silesia.tar : 211984896 -> 66683478 (3.179), 97.4 MB/s , 435.0 MB/s
3#enwik8 : 100000000 -> 35643153 (2.806), 76.2 MB/s , 338.1 MB/s
Mileage vary, depending on file, and cpu type.
But a generic rule is : x86 benefits less from "long-offset mode" than x64,
maybe due to register pressure.
On "entropy", long-mode is _never_ a win for x86.
On my laptop though, it may, depending on file and compression level
(enwik8 benefits more from "long-mode" than silesia).
2018-02-04 07:54:10 +00:00
* if set to 1 + , ZSTD_decompress ( ) can decode older formats ( v0 .1 + )
2015-11-11 12:43:58 +00:00
*/
# ifndef ZSTD_LEGACY_SUPPORT
2016-01-18 11:03:27 +00:00
# define ZSTD_LEGACY_SUPPORT 0
2015-11-11 12:43:58 +00:00
# endif
2016-08-22 23:34:34 +00:00
/*!
2018-03-20 20:40:29 +00:00
* MAXWINDOWSIZE_DEFAULT :
* maximum window size accepted by DStream __by default__ .
* Frames requiring more memory will be rejected .
* It ' s possible to set a different limit using ZSTD_DCtx_setMaxWindowSize ( ) .
*/
2016-08-23 14:58:10 +00:00
# ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
2017-09-22 21:04:39 +00:00
# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_DEFAULTMAX) + 1)
2016-08-22 23:34:34 +00:00
# endif
error on no forward progress
streaming decoders, such as ZSTD_decompressStream() or ZSTD_decompress_generic(),
may end up making no forward progress,
(aka no byte read from input __and__ no byte written to output),
due to unusual parameters conditions,
such as providing an output buffer already full.
In such case, the caller may be caught in an infinite loop,
calling the streaming decompression function again and again,
without making any progress.
This version detects such situation, and generates an error instead :
ZSTD_error_dstSize_tooSmall when output buffer is full,
ZSTD_error_srcSize_wrong when input buffer is empty.
The detection tolerates a number of attempts before triggering an error,
controlled by ZSTD_NO_FORWARD_PROGRESS_MAX macro constant,
which is set to 16 by default, and can be re-defined at compilation time.
This behavior tolerates potentially existing implementations
where such cases happen sporadically, like once or twice,
which is not dangerous (only infinite loops are),
without generating an error, hence without breaking these implementations.
2018-06-23 00:58:21 +00:00
/*!
* NO_FORWARD_PROGRESS_MAX :
* maximum allowed nb of calls to ZSTD_decompressStream ( ) and ZSTD_decompress_generic ( )
* without any forward progress
* ( defined as : no byte read from input , and no byte flushed to output )
* before triggering an error .
*/
# ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
# define ZSTD_NO_FORWARD_PROGRESS_MAX 16
# endif
2018-09-12 00:23:44 +00:00
2016-01-27 23:18:06 +00:00
/*-*******************************************************
2016-02-04 14:28:14 +00:00
* Dependencies
2015-11-11 12:43:58 +00:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-06-06 17:52:35 +00:00
# include <string.h> /* memcpy, memmove, memset */
2018-09-12 00:23:44 +00:00
# include "cpu.h" /* prefetch */
2015-11-11 12:43:58 +00:00
# include "mem.h" /* low level memory routines */
2016-06-04 22:58:01 +00:00
# define FSE_STATIC_LINKING_ONLY
# include "fse.h"
2016-06-04 22:42:28 +00:00
# define HUF_STATIC_LINKING_ONLY
# include "huf.h"
# include "zstd_internal.h"
2015-11-11 12:43:58 +00:00
2016-06-05 22:26:38 +00:00
# if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
2015-11-11 12:43:58 +00:00
# include "zstd_legacy.h"
# endif
2018-09-12 22:35:21 +00:00
static const void * ZSTD_DDictDictContent ( const ZSTD_DDict * ddict ) ;
static size_t ZSTD_DDictDictSize ( const ZSTD_DDict * ddict ) ;
2017-05-23 23:19:43 +00:00
2016-05-12 13:55:26 +00:00
/*-*************************************
2017-05-23 23:19:43 +00:00
* Errors
2016-05-12 13:55:26 +00:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# define ZSTD_isError ERR_isError /* for inlining */
# define FSE_isError ERR_isError
# define HUF_isError ERR_isError
2016-03-12 00:25:40 +00:00
/*_*******************************************************
2015-11-11 12:43:58 +00:00
* Memory operations
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static void ZSTD_copy4 ( void * dst , const void * src ) { memcpy ( dst , src , 4 ) ; }
2016-03-12 00:25:40 +00:00
/*-*************************************************************
2015-11-12 14:36:05 +00:00
* Context management
2015-11-11 12:43:58 +00:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-11-25 20:09:17 +00:00
typedef enum { ZSTDds_getFrameHeaderSize , ZSTDds_decodeFrameHeader ,
2016-05-31 10:43:46 +00:00
ZSTDds_decodeBlockHeader , ZSTDds_decompressBlock ,
2016-07-27 22:55:43 +00:00
ZSTDds_decompressLastBlock , ZSTDds_checkChecksum ,
2016-05-31 10:43:46 +00:00
ZSTDds_decodeSkippableHeader , ZSTDds_skipFrame } ZSTD_dStage ;
2015-11-25 13:42:45 +00:00
2017-05-23 23:19:43 +00:00
typedef enum { zdss_init = 0 , zdss_loadHeader ,
zdss_read , zdss_load , zdss_flush } ZSTD_dStreamStage ;
2018-02-09 12:25:15 +00:00
typedef struct {
U32 fastMode ;
2018-02-09 12:50:58 +00:00
U32 tableLog ;
2018-02-09 12:25:15 +00:00
} ZSTD_seqSymbol_header ;
2017-02-26 02:33:31 +00:00
typedef struct {
2018-02-09 12:25:15 +00:00
U16 nextState ;
BYTE nbAdditionalBits ;
BYTE nbBits ;
U32 baseValue ;
} ZSTD_seqSymbol ;
2018-03-05 23:12:10 +00:00
# define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
2018-02-13 00:52:15 +00:00
2017-02-26 02:33:31 +00:00
typedef struct {
2018-09-10 18:24:17 +00:00
ZSTD_seqSymbol LLTable [ SEQSYMBOL_TABLE_SIZE ( LLFSELog ) ] ; /* Note : Space reserved for FSE Tables */
ZSTD_seqSymbol OFTable [ SEQSYMBOL_TABLE_SIZE ( OffFSELog ) ] ; /* is also used as temporary workspace while building hufTable during DDict creation */
ZSTD_seqSymbol MLTable [ SEQSYMBOL_TABLE_SIZE ( MLFSELog ) ] ; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
2017-02-26 02:33:31 +00:00
HUF_DTable hufTable [ HUF_DTABLE_SIZE ( HufLog ) ] ; /* can accommodate HUF_decompress4X */
U32 rep [ ZSTD_REP_NUM ] ;
2017-07-13 02:08:24 +00:00
} ZSTD_entropyDTables_t ;
2017-02-26 02:33:31 +00:00
2015-11-11 12:43:58 +00:00
struct ZSTD_DCtx_s
{
2018-02-09 12:25:15 +00:00
const ZSTD_seqSymbol * LLTptr ;
const ZSTD_seqSymbol * MLTptr ;
const ZSTD_seqSymbol * OFTptr ;
2016-09-13 14:52:16 +00:00
const HUF_DTable * HUFptr ;
2017-07-13 02:08:24 +00:00
ZSTD_entropyDTables_t entropy ;
2018-09-07 00:07:53 +00:00
U32 workspace [ HUF_DECOMPRESS_WORKSPACE_SIZE_U32 ] ; /* space needed when building huffman tables */
2017-02-25 18:11:15 +00:00
const void * previousDstEnd ; /* detect continuity */
2018-06-01 22:18:32 +00:00
const void * prefixStart ; /* start of current segment */
const void * virtualStart ; /* virtual start of previous segment if it was just before current one */
2017-02-25 18:11:15 +00:00
const void * dictEnd ; /* end of previous segment */
2015-11-11 12:43:58 +00:00
size_t expected ;
2017-05-09 22:46:07 +00:00
ZSTD_frameHeader fParams ;
2017-09-09 21:37:28 +00:00
U64 decodedSize ;
blockType_e bType ; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
2015-11-25 13:42:45 +00:00
ZSTD_dStage stage ;
2016-06-11 00:52:42 +00:00
U32 litEntropy ;
U32 fseEntropy ;
XXH64_state_t xxhState ;
size_t headerSize ;
2017-09-25 21:26:26 +00:00
ZSTD_format_e format ;
2015-11-11 12:43:58 +00:00
const BYTE * litPtr ;
2016-06-11 00:52:42 +00:00
ZSTD_customMem customMem ;
2015-11-11 12:43:58 +00:00
size_t litSize ;
2016-07-27 22:55:43 +00:00
size_t rleSize ;
2017-05-25 00:41:41 +00:00
size_t staticSize ;
2018-02-20 22:12:11 +00:00
int bmi2 ; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
2017-05-23 23:19:43 +00:00
2018-09-12 00:23:44 +00:00
/* dictionary */
2017-05-23 23:19:43 +00:00
ZSTD_DDict * ddictLocal ;
2018-09-12 22:35:21 +00:00
const ZSTD_DDict * ddict ; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
2018-09-12 00:23:44 +00:00
U32 dictID ;
int ddictIsCold ; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
/* streaming */
2017-05-23 23:19:43 +00:00
ZSTD_dStreamStage streamStage ;
char * inBuff ;
size_t inBuffSize ;
size_t inPos ;
size_t maxWindowSize ;
char * outBuff ;
size_t outBuffSize ;
size_t outStart ;
size_t outEnd ;
size_t lhSize ;
void * legacyContext ;
U32 previousLegacyVersion ;
U32 legacyVersion ;
U32 hostageByte ;
error on no forward progress
streaming decoders, such as ZSTD_decompressStream() or ZSTD_decompress_generic(),
may end up making no forward progress,
(aka no byte read from input __and__ no byte written to output),
due to unusual parameters conditions,
such as providing an output buffer already full.
In such case, the caller may be caught in an infinite loop,
calling the streaming decompression function again and again,
without making any progress.
This version detects such situation, and generates an error instead :
ZSTD_error_dstSize_tooSmall when output buffer is full,
ZSTD_error_srcSize_wrong when input buffer is empty.
The detection tolerates a number of attempts before triggering an error,
controlled by ZSTD_NO_FORWARD_PROGRESS_MAX macro constant,
which is set to 16 by default, and can be re-defined at compilation time.
This behavior tolerates potentially existing implementations
where such cases happen sporadically, like once or twice,
which is not dangerous (only infinite loops are),
without generating an error, hence without breaking these implementations.
2018-06-23 00:58:21 +00:00
int noForwardProgress ;
2017-05-25 00:41:41 +00:00
/* workspace */
BYTE litBuffer [ ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH ] ;
2016-03-11 20:58:04 +00:00
BYTE headerBuffer [ ZSTD_FRAMEHEADERSIZE_MAX ] ;
2016-08-29 16:03:12 +00:00
} ; /* typedef'd to ZSTD_DCtx within "zstd.h" */
2015-11-11 12:43:58 +00:00
2017-05-23 23:19:43 +00:00
size_t ZSTD_sizeof_DCtx ( const ZSTD_DCtx * dctx )
{
if ( dctx = = NULL ) return 0 ; /* support sizeof NULL */
return sizeof ( * dctx )
+ ZSTD_sizeof_DDict ( dctx - > ddictLocal )
+ dctx - > inBuffSize + dctx - > outBuffSize ;
}
2016-01-27 23:18:06 +00:00
2016-07-11 11:46:25 +00:00
size_t ZSTD_estimateDCtxSize ( void ) { return sizeof ( ZSTD_DCtx ) ; }
2017-09-26 22:36:14 +00:00
static size_t ZSTD_startingInputLength ( ZSTD_format_e format )
{
size_t const startingInputLength = ( format = = ZSTD_f_zstd1_magicless ) ?
2018-08-14 19:56:21 +00:00
ZSTD_frameHeaderSize_prefix - ZSTD_FRAMEIDSIZE :
2017-09-26 22:36:14 +00:00
ZSTD_frameHeaderSize_prefix ;
ZSTD_STATIC_ASSERT ( ZSTD_FRAMEHEADERSIZE_PREFIX > = ZSTD_FRAMEIDSIZE ) ;
2017-09-26 22:36:57 +00:00
/* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
assert ( ( format = = ZSTD_f_zstd1 ) | | ( format = = ZSTD_f_zstd1_magicless ) ) ;
2017-09-26 22:36:14 +00:00
return startingInputLength ;
}
2017-05-25 00:41:41 +00:00
static void ZSTD_initDCtx_internal ( ZSTD_DCtx * dctx )
2016-05-23 14:24:52 +00:00
{
2017-09-27 01:26:09 +00:00
dctx - > format = ZSTD_f_zstd1 ; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */
2017-09-09 21:37:28 +00:00
dctx - > staticSize = 0 ;
2017-05-23 23:19:43 +00:00
dctx - > maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT ;
2017-09-09 21:37:28 +00:00
dctx - > ddict = NULL ;
dctx - > ddictLocal = NULL ;
2018-09-12 00:23:44 +00:00
dctx - > ddictIsCold = 0 ;
2017-09-09 21:37:28 +00:00
dctx - > inBuff = NULL ;
dctx - > inBuffSize = 0 ;
dctx - > outBuffSize = 0 ;
2017-05-25 00:41:41 +00:00
dctx - > streamStage = zdss_init ;
2018-06-14 14:22:24 +00:00
dctx - > legacyContext = NULL ;
dctx - > previousLegacyVersion = 0 ;
error on no forward progress
streaming decoders, such as ZSTD_decompressStream() or ZSTD_decompress_generic(),
may end up making no forward progress,
(aka no byte read from input __and__ no byte written to output),
due to unusual parameters conditions,
such as providing an output buffer already full.
In such case, the caller may be caught in an infinite loop,
calling the streaming decompression function again and again,
without making any progress.
This version detects such situation, and generates an error instead :
ZSTD_error_dstSize_tooSmall when output buffer is full,
ZSTD_error_srcSize_wrong when input buffer is empty.
The detection tolerates a number of attempts before triggering an error,
controlled by ZSTD_NO_FORWARD_PROGRESS_MAX macro constant,
which is set to 16 by default, and can be re-defined at compilation time.
This behavior tolerates potentially existing implementations
where such cases happen sporadically, like once or twice,
which is not dangerous (only infinite loops are),
without generating an error, hence without breaking these implementations.
2018-06-23 00:58:21 +00:00
dctx - > noForwardProgress = 0 ;
2018-02-03 02:03:09 +00:00
dctx - > bmi2 = ZSTD_cpuid_bmi2 ( ZSTD_cpuid ( ) ) ;
2017-09-27 01:26:09 +00:00
}
ZSTD_DCtx * ZSTD_initStaticDCtx ( void * workspace , size_t workspaceSize )
{
ZSTD_DCtx * const dctx = ( ZSTD_DCtx * ) workspace ;
if ( ( size_t ) workspace & 7 ) return NULL ; /* 8-aligned */
if ( workspaceSize < sizeof ( ZSTD_DCtx ) ) return NULL ; /* minimum size */
ZSTD_initDCtx_internal ( dctx ) ;
dctx - > staticSize = workspaceSize ;
dctx - > inBuff = ( char * ) ( dctx + 1 ) ;
return dctx ;
2017-05-25 00:41:41 +00:00
}
2016-05-23 14:24:52 +00:00
ZSTD_DCtx * ZSTD_createDCtx_advanced ( ZSTD_customMem customMem )
{
2017-06-02 21:24:58 +00:00
if ( ! customMem . customAlloc ^ ! customMem . customFree ) return NULL ;
2016-05-23 14:24:52 +00:00
2017-06-02 21:24:58 +00:00
{ ZSTD_DCtx * const dctx = ( ZSTD_DCtx * ) ZSTD_malloc ( sizeof ( * dctx ) , customMem ) ;
if ( ! dctx ) return NULL ;
dctx - > customMem = customMem ;
ZSTD_initDCtx_internal ( dctx ) ;
return dctx ;
}
2017-05-25 00:41:41 +00:00
}
2016-05-31 22:18:28 +00:00
ZSTD_DCtx * ZSTD_createDCtx ( void )
{
2017-12-30 14:12:59 +00:00
DEBUGLOG ( 3 , " ZSTD_createDCtx " ) ;
2017-05-31 00:11:39 +00:00
return ZSTD_createDCtx_advanced ( ZSTD_defaultCMem ) ;
2016-05-31 22:18:28 +00:00
}
2015-11-12 14:36:05 +00:00
size_t ZSTD_freeDCtx ( ZSTD_DCtx * dctx )
{
2016-06-03 14:36:50 +00:00
if ( dctx = = NULL ) return 0 ; /* support free on NULL */
2017-06-21 20:26:10 +00:00
if ( dctx - > staticSize ) return ERROR ( memory_allocation ) ; /* not compatible with static DCtx */
2017-05-23 23:19:43 +00:00
{ ZSTD_customMem const cMem = dctx - > customMem ;
ZSTD_freeDDict ( dctx - > ddictLocal ) ;
dctx - > ddictLocal = NULL ;
ZSTD_free ( dctx - > inBuff , cMem ) ;
dctx - > inBuff = NULL ;
# if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
if ( dctx - > legacyContext )
ZSTD_freeLegacyStreamContext ( dctx - > legacyContext , dctx - > previousLegacyVersion ) ;
# endif
ZSTD_free ( dctx , cMem ) ;
return 0 ;
}
2015-11-12 14:36:05 +00:00
}
2017-06-21 20:26:10 +00:00
/* no longer useful */
2016-01-26 14:58:49 +00:00
void ZSTD_copyDCtx ( ZSTD_DCtx * dstDCtx , const ZSTD_DCtx * srcDCtx )
{
2017-05-25 00:41:41 +00:00
size_t const toCopy = ( size_t ) ( ( char * ) ( & dstDCtx - > inBuff ) - ( char * ) dstDCtx ) ;
memcpy ( dstDCtx , srcDCtx , toCopy ) ; /* no need to copy workspace */
2016-01-26 14:58:49 +00:00
}
2015-11-12 14:36:05 +00:00
2016-03-12 00:25:40 +00:00
/*-*************************************************************
faster decoding in 32-bits mode for long offsets (tentative)
On my laptop:
Before:
./zstd32 -b --zstd=wlog=27 silesia.tar enwik8 -S
3#silesia.tar : 211984896 -> 66683478 (3.179), 97.6 MB/s , 400.7 MB/s
3#enwik8 : 100000000 -> 35643153 (2.806), 76.5 MB/s , 303.2 MB/s
After:
./zstd32 -b --zstd=wlog=27 silesia.tar enwik8 -S
3#silesia.tar : 211984896 -> 66683478 (3.179), 97.4 MB/s , 435.0 MB/s
3#enwik8 : 100000000 -> 35643153 (2.806), 76.2 MB/s , 338.1 MB/s
Mileage vary, depending on file, and cpu type.
But a generic rule is : x86 benefits less from "long-offset mode" than x64,
maybe due to register pressure.
On "entropy", long-mode is _never_ a win for x86.
On my laptop though, it may, depending on file and compression level
(enwik8 benefits more from "long-mode" than silesia).
2018-02-04 07:54:10 +00:00
* Frame header decoding
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-01-23 18:28:41 +00:00
2016-11-03 00:30:49 +00:00
/*! ZSTD_isFrame() :
* Tells if the content of ` buffer ` starts with a valid Frame Identifier .
* Note : Frame Identifier is 4 bytes . If ` size < 4 ` , @ return will always be 0.
* Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled .
* Note 3 : Skippable Frame Identifiers are considered valid . */
unsigned ZSTD_isFrame ( const void * buffer , size_t size )
{
2018-08-14 19:56:21 +00:00
if ( size < ZSTD_FRAMEIDSIZE ) return 0 ;
2016-11-03 00:30:49 +00:00
{ U32 const magic = MEM_readLE32 ( buffer ) ;
if ( magic = = ZSTD_MAGICNUMBER ) return 1 ;
if ( ( magic & 0xFFFFFFF0U ) = = ZSTD_MAGIC_SKIPPABLE_START ) return 1 ;
}
# if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
if ( ZSTD_isLegacy ( buffer , size ) ) return 1 ;
# endif
return 0 ;
}
2017-09-25 21:26:26 +00:00
/** ZSTD_frameHeaderSize_internal() :
* srcSize must be large enough to reach header size fields .
faster decoding in 32-bits mode for long offsets (tentative)
On my laptop:
Before:
./zstd32 -b --zstd=wlog=27 silesia.tar enwik8 -S
3#silesia.tar : 211984896 -> 66683478 (3.179), 97.6 MB/s , 400.7 MB/s
3#enwik8 : 100000000 -> 35643153 (2.806), 76.5 MB/s , 303.2 MB/s
After:
./zstd32 -b --zstd=wlog=27 silesia.tar enwik8 -S
3#silesia.tar : 211984896 -> 66683478 (3.179), 97.4 MB/s , 435.0 MB/s
3#enwik8 : 100000000 -> 35643153 (2.806), 76.2 MB/s , 338.1 MB/s
Mileage vary, depending on file, and cpu type.
But a generic rule is : x86 benefits less from "long-offset mode" than x64,
maybe due to register pressure.
On "entropy", long-mode is _never_ a win for x86.
On my laptop though, it may, depending on file and compression level
(enwik8 benefits more from "long-mode" than silesia).
2018-02-04 07:54:10 +00:00
* note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless .
2017-09-25 21:26:26 +00:00
* @ return : size of the Frame Header
* or an error code , which can be tested with ZSTD_isError ( ) */
static size_t ZSTD_frameHeaderSize_internal ( const void * src , size_t srcSize , ZSTD_format_e format )
2015-11-25 13:42:45 +00:00
{
2017-09-26 22:36:14 +00:00
size_t const minInputSize = ZSTD_startingInputLength ( format ) ;
2017-09-25 21:26:26 +00:00
if ( srcSize < minInputSize ) return ERROR ( srcSize_wrong ) ;
{ BYTE const fhd = ( ( const BYTE * ) src ) [ minInputSize - 1 ] ;
2016-06-05 22:26:38 +00:00
U32 const dictID = fhd & 3 ;
2016-07-26 17:42:19 +00:00
U32 const singleSegment = ( fhd > > 5 ) & 1 ;
2016-06-05 22:26:38 +00:00
U32 const fcsId = fhd > > 6 ;
2017-09-25 21:26:26 +00:00
return minInputSize + ! singleSegment
+ ZSTD_did_fieldSize [ dictID ] + ZSTD_fcs_fieldSize [ fcsId ]
+ ( singleSegment & & ! fcsId ) ;
2016-05-29 03:01:04 +00:00
}
2015-11-25 13:42:45 +00:00
}
2017-09-25 21:26:26 +00:00
/** ZSTD_frameHeaderSize() :
* srcSize must be > = ZSTD_frameHeaderSize_prefix .
2018-03-31 03:09:27 +00:00
* @ return : size of the Frame Header ,
* or an error code ( if srcSize is too small ) */
2017-09-25 21:26:26 +00:00
size_t ZSTD_frameHeaderSize ( const void * src , size_t srcSize )
{
return ZSTD_frameHeaderSize_internal ( src , srcSize , ZSTD_f_zstd1 ) ;
}
2015-11-25 13:42:45 +00:00
2017-09-25 21:26:26 +00:00
2018-03-31 03:09:27 +00:00
/** ZSTD_getFrameHeader_advanced() :
2017-09-25 21:26:26 +00:00
* decode Frame Header , or require larger ` srcSize ` .
* note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
* @ return : 0 , ` zfhPtr ` is correctly filled ,
* > 0 , ` srcSize ` is too small , value is wanted ` srcSize ` amount ,
* or an error code , which can be tested using ZSTD_isError ( ) */
2018-03-29 23:51:08 +00:00
size_t ZSTD_getFrameHeader_advanced ( ZSTD_frameHeader * zfhPtr , const void * src , size_t srcSize , ZSTD_format_e format )
2015-11-25 13:42:45 +00:00
{
2016-03-12 00:25:40 +00:00
const BYTE * ip = ( const BYTE * ) src ;
2017-09-26 22:36:14 +00:00
size_t const minInputSize = ZSTD_startingInputLength ( format ) ;
2017-05-09 23:18:17 +00:00
2018-08-15 21:35:38 +00:00
memset ( zfhPtr , 0 , sizeof ( * zfhPtr ) ) ; /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
2017-09-25 21:26:26 +00:00
if ( srcSize < minInputSize ) return minInputSize ;
2018-08-14 19:56:21 +00:00
if ( src = = NULL ) return ERROR ( GENERIC ) ; /* invalid parameter */
2017-09-25 21:26:26 +00:00
2017-09-26 22:06:30 +00:00
if ( ( format ! = ZSTD_f_zstd1_magicless )
& & ( MEM_readLE32 ( src ) ! = ZSTD_MAGICNUMBER ) ) {
2016-05-31 10:43:46 +00:00
if ( ( MEM_readLE32 ( src ) & 0xFFFFFFF0U ) = = ZSTD_MAGIC_SKIPPABLE_START ) {
2017-05-09 23:18:17 +00:00
/* skippable frame */
2017-05-23 23:19:43 +00:00
if ( srcSize < ZSTD_skippableHeaderSize )
return ZSTD_skippableHeaderSize ; /* magic number + frame length */
2017-05-10 18:14:08 +00:00
memset ( zfhPtr , 0 , sizeof ( * zfhPtr ) ) ;
2018-08-14 19:56:21 +00:00
zfhPtr - > frameContentSize = MEM_readLE32 ( ( const char * ) src + ZSTD_FRAMEIDSIZE ) ;
2017-07-07 22:21:35 +00:00
zfhPtr - > frameType = ZSTD_skippableFrame ;
2016-05-31 10:43:46 +00:00
return 0 ;
}
return ERROR ( prefix_unknown ) ;
}
2016-03-12 00:25:40 +00:00
2016-03-15 00:33:36 +00:00
/* ensure there is enough `srcSize` to fully read/decode frame header */
2017-09-25 21:26:26 +00:00
{ size_t const fhsize = ZSTD_frameHeaderSize_internal ( src , srcSize , format ) ;
2017-07-07 22:51:24 +00:00
if ( srcSize < fhsize ) return fhsize ;
zfhPtr - > headerSize = ( U32 ) fhsize ;
}
2016-03-12 00:25:40 +00:00
2017-09-25 21:26:26 +00:00
{ BYTE const fhdByte = ip [ minInputSize - 1 ] ;
size_t pos = minInputSize ;
2016-06-05 22:26:38 +00:00
U32 const dictIDSizeCode = fhdByte & 3 ;
U32 const checksumFlag = ( fhdByte > > 2 ) & 1 ;
2016-07-26 17:42:19 +00:00
U32 const singleSegment = ( fhdByte > > 5 ) & 1 ;
2016-06-05 22:26:38 +00:00
U32 const fcsID = fhdByte > > 6 ;
2017-07-07 22:21:35 +00:00
U64 windowSize = 0 ;
2016-06-05 22:26:38 +00:00
U32 dictID = 0 ;
2017-07-07 22:21:35 +00:00
U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN ;
2017-05-23 23:19:43 +00:00
if ( ( fhdByte & 0x08 ) ! = 0 )
2017-07-07 22:21:35 +00:00
return ERROR ( frameParameter_unsupported ) ; /* reserved bits, must be zero */
2016-07-26 17:42:19 +00:00
if ( ! singleSegment ) {
2016-06-05 22:26:38 +00:00
BYTE const wlByte = ip [ pos + + ] ;
U32 const windowLog = ( wlByte > > 3 ) + ZSTD_WINDOWLOG_ABSOLUTEMIN ;
2017-05-23 23:19:43 +00:00
if ( windowLog > ZSTD_WINDOWLOG_MAX )
return ERROR ( frameParameter_windowTooLarge ) ;
2017-07-07 23:09:47 +00:00
windowSize = ( 1ULL < < windowLog ) ;
2016-06-05 22:26:38 +00:00
windowSize + = ( windowSize > > 3 ) * ( wlByte & 7 ) ;
}
switch ( dictIDSizeCode )
2016-05-29 03:01:04 +00:00
{
2017-07-07 22:21:35 +00:00
default : assert ( 0 ) ; /* impossible */
2016-06-05 22:26:38 +00:00
case 0 : break ;
case 1 : dictID = ip [ pos ] ; pos + + ; break ;
case 2 : dictID = MEM_readLE16 ( ip + pos ) ; pos + = 2 ; break ;
case 3 : dictID = MEM_readLE32 ( ip + pos ) ; pos + = 4 ; break ;
2016-05-29 03:01:04 +00:00
}
2016-06-05 22:26:38 +00:00
switch ( fcsID )
2016-03-15 00:33:36 +00:00
{
2017-07-07 22:21:35 +00:00
default : assert ( 0 ) ; /* impossible */
2016-07-26 17:42:19 +00:00
case 0 : if ( singleSegment ) frameContentSize = ip [ pos ] ; break ;
2016-06-05 22:26:38 +00:00
case 1 : frameContentSize = MEM_readLE16 ( ip + pos ) + 256 ; break ;
case 2 : frameContentSize = MEM_readLE32 ( ip + pos ) ; break ;
case 3 : frameContentSize = MEM_readLE64 ( ip + pos ) ; break ;
}
2017-07-07 22:21:35 +00:00
if ( singleSegment ) windowSize = frameContentSize ;
zfhPtr - > frameType = ZSTD_frame ;
2017-05-10 18:14:08 +00:00
zfhPtr - > frameContentSize = frameContentSize ;
zfhPtr - > windowSize = windowSize ;
2017-09-09 08:03:29 +00:00
zfhPtr - > blockSizeMax = ( unsigned ) MIN ( windowSize , ZSTD_BLOCKSIZE_MAX ) ;
2017-05-10 18:14:08 +00:00
zfhPtr - > dictID = dictID ;
zfhPtr - > checksumFlag = checksumFlag ;
2016-06-05 22:26:38 +00:00
}
2015-11-25 13:42:45 +00:00
return 0 ;
}
2017-09-25 21:26:26 +00:00
/** ZSTD_getFrameHeader() :
* decode Frame Header , or require larger ` srcSize ` .
* note : this function does not consume input , it only reads it .
* @ return : 0 , ` zfhPtr ` is correctly filled ,
* > 0 , ` srcSize ` is too small , value is wanted ` srcSize ` amount ,
* or an error code , which can be tested using ZSTD_isError ( ) */
size_t ZSTD_getFrameHeader ( ZSTD_frameHeader * zfhPtr , const void * src , size_t srcSize )
{
2018-03-29 23:51:08 +00:00
return ZSTD_getFrameHeader_advanced ( zfhPtr , src , srcSize , ZSTD_f_zstd1 ) ;
2017-09-25 21:26:26 +00:00
}
2017-02-07 21:50:09 +00:00
/** ZSTD_getFrameContentSize() :
2017-07-07 22:21:35 +00:00
* compatible with legacy mode
* @ return : decompressed size of the single frame pointed to be ` src ` if known , otherwise
* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
* - ZSTD_CONTENTSIZE_ERROR if an error occurred ( e . g . invalid magic number , srcSize too small ) */
2017-02-07 21:50:09 +00:00
unsigned long long ZSTD_getFrameContentSize ( const void * src , size_t srcSize )
{
2017-03-13 21:32:30 +00:00
# if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
2017-02-07 21:50:09 +00:00
if ( ZSTD_isLegacy ( src , srcSize ) ) {
unsigned long long const ret = ZSTD_getDecompressedSize_legacy ( src , srcSize ) ;
return ret = = 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret ;
}
# endif
2017-07-07 22:21:35 +00:00
{ ZSTD_frameHeader zfh ;
if ( ZSTD_getFrameHeader ( & zfh , src , srcSize ) ! = 0 )
2017-07-07 21:19:01 +00:00
return ZSTD_CONTENTSIZE_ERROR ;
2017-07-07 22:21:35 +00:00
if ( zfh . frameType = = ZSTD_skippableFrame ) {
2017-02-07 21:50:09 +00:00
return 0 ;
} else {
2017-07-07 22:21:35 +00:00
return zfh . frameContentSize ;
} }
2017-02-07 21:50:09 +00:00
}
/** ZSTD_findDecompressedSize() :
* compatible with legacy mode
* ` srcSize ` must be the exact length of some number of ZSTD compressed and / or
* skippable frames
* @ return : decompressed size of the frames contained */
unsigned long long ZSTD_findDecompressedSize ( const void * src , size_t srcSize )
{
2017-05-23 23:19:43 +00:00
unsigned long long totalDstSize = 0 ;
2017-02-07 21:50:09 +00:00
2017-05-23 23:19:43 +00:00
while ( srcSize > = ZSTD_frameHeaderSize_prefix ) {
2017-09-25 21:26:26 +00:00
U32 const magicNumber = MEM_readLE32 ( src ) ;
2017-05-23 23:19:43 +00:00
if ( ( magicNumber & 0xFFFFFFF0U ) = = ZSTD_MAGIC_SKIPPABLE_START ) {
size_t skippableSize ;
if ( srcSize < ZSTD_skippableHeaderSize )
return ERROR ( srcSize_wrong ) ;
2018-08-14 19:56:21 +00:00
skippableSize = MEM_readLE32 ( ( const BYTE * ) src + ZSTD_FRAMEIDSIZE )
2017-09-25 22:25:07 +00:00
+ ZSTD_skippableHeaderSize ;
2017-05-23 23:19:43 +00:00
if ( srcSize < skippableSize ) {
return ZSTD_CONTENTSIZE_ERROR ;
2017-02-07 21:50:09 +00:00
}
2017-05-23 23:19:43 +00:00
src = ( const BYTE * ) src + skippableSize ;
srcSize - = skippableSize ;
continue ;
}
2017-02-07 21:50:09 +00:00
2017-05-23 23:19:43 +00:00
{ unsigned long long const ret = ZSTD_getFrameContentSize ( src , srcSize ) ;
if ( ret > = ZSTD_CONTENTSIZE_ERROR ) return ret ;
2017-02-07 21:50:09 +00:00
2017-05-23 23:19:43 +00:00
/* check for overflow */
if ( totalDstSize + ret < totalDstSize ) return ZSTD_CONTENTSIZE_ERROR ;
totalDstSize + = ret ;
2017-02-07 21:50:09 +00:00
}
2017-05-23 23:19:43 +00:00
{ size_t const frameSrcSize = ZSTD_findFrameCompressedSize ( src , srcSize ) ;
if ( ZSTD_isError ( frameSrcSize ) ) {
return ZSTD_CONTENTSIZE_ERROR ;
}
2017-02-07 21:50:09 +00:00
2017-05-23 23:19:43 +00:00
src = ( const BYTE * ) src + frameSrcSize ;
srcSize - = frameSrcSize ;
2017-02-07 21:50:09 +00:00
}
2017-09-25 21:26:26 +00:00
} /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
2017-02-07 21:50:09 +00:00
2017-09-25 21:26:26 +00:00
if ( srcSize ) return ZSTD_CONTENTSIZE_ERROR ;
2017-05-23 23:19:43 +00:00
return totalDstSize ;
2017-02-07 21:50:09 +00:00
}
2016-03-12 00:25:40 +00:00
2016-07-07 11:14:21 +00:00
/** ZSTD_getDecompressedSize() :
* compatible with legacy mode
* @ return : decompressed size if known , 0 otherwise
note : 0 can mean any of the following :
2017-07-07 21:19:01 +00:00
- frame content is empty
- decompressed size field is not present in frame header
2016-07-07 11:14:21 +00:00
- frame header unknown / not supported
2016-07-20 18:12:24 +00:00
- frame header not complete ( ` srcSize ` too small ) */
2016-07-07 12:40:13 +00:00
unsigned long long ZSTD_getDecompressedSize ( const void * src , size_t srcSize )
{
2017-02-07 21:50:09 +00:00
unsigned long long const ret = ZSTD_getFrameContentSize ( src , srcSize ) ;
2017-09-25 21:26:26 +00:00
ZSTD_STATIC_ASSERT ( ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN ) ;
return ( ret > = ZSTD_CONTENTSIZE_ERROR ) ? 0 : ret ;
2016-07-07 11:14:21 +00:00
}
2016-03-12 00:25:40 +00:00
/** ZSTD_decodeFrameHeader() :
2016-10-21 03:11:00 +00:00
* ` headerSize ` must be the size provided by ZSTD_frameHeaderSize ( ) .
2016-03-19 13:14:31 +00:00
* @ return : 0 if success , or an error code , which can be tested using ZSTD_isError ( ) */
2016-10-21 03:11:00 +00:00
static size_t ZSTD_decodeFrameHeader ( ZSTD_DCtx * dctx , const void * src , size_t headerSize )
2015-11-26 11:43:28 +00:00
{
2018-03-29 23:51:08 +00:00
size_t const result = ZSTD_getFrameHeader_advanced ( & ( dctx - > fParams ) , src , headerSize , dctx - > format ) ;
2017-09-25 21:26:26 +00:00
if ( ZSTD_isError ( result ) ) return result ; /* invalid header */
if ( result > 0 ) return ERROR ( srcSize_wrong ) ; /* headerSize too small */
2017-07-07 22:21:35 +00:00
if ( dctx - > fParams . dictID & & ( dctx - > dictID ! = dctx - > fParams . dictID ) )
return ERROR ( dictionary_wrong ) ;
2016-05-31 16:13:56 +00:00
if ( dctx - > fParams . checksumFlag ) XXH64_reset ( & dctx - > xxhState , 0 ) ;
2016-10-21 03:11:00 +00:00
return 0 ;
2015-11-26 11:43:28 +00:00
}
2015-11-11 12:43:58 +00:00
faster decoding in 32-bits mode for long offsets (tentative)
On my laptop:
Before:
./zstd32 -b --zstd=wlog=27 silesia.tar enwik8 -S
3#silesia.tar : 211984896 -> 66683478 (3.179), 97.6 MB/s , 400.7 MB/s
3#enwik8 : 100000000 -> 35643153 (2.806), 76.5 MB/s , 303.2 MB/s
After:
./zstd32 -b --zstd=wlog=27 silesia.tar enwik8 -S
3#silesia.tar : 211984896 -> 66683478 (3.179), 97.4 MB/s , 435.0 MB/s
3#enwik8 : 100000000 -> 35643153 (2.806), 76.2 MB/s , 338.1 MB/s
Mileage vary, depending on file, and cpu type.
But a generic rule is : x86 benefits less from "long-offset mode" than x64,
maybe due to register pressure.
On "entropy", long-mode is _never_ a win for x86.
On my laptop though, it may, depending on file and compression level
(enwik8 benefits more from "long-mode" than silesia).
2018-02-04 07:54:10 +00:00
/*-*************************************************************
* Block decoding
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-03-19 13:14:31 +00:00
/*! ZSTD_getcBlockSize() :
* Provides the size of compressed block from block header ` src ` */
2017-04-28 23:52:36 +00:00
size_t ZSTD_getcBlockSize ( const void * src , size_t srcSize ,
blockProperties_t * bpPtr )
2015-11-11 12:43:58 +00:00
{
2016-03-19 13:14:31 +00:00
if ( srcSize < ZSTD_blockHeaderSize ) return ERROR ( srcSize_wrong ) ;
2016-07-20 12:58:49 +00:00
{ U32 const cBlockHeader = MEM_readLE24 ( src ) ;
2016-07-27 22:55:43 +00:00
U32 const cSize = cBlockHeader > > 3 ;
bpPtr - > lastBlock = cBlockHeader & 1 ;
bpPtr - > blockType = ( blockType_e ) ( ( cBlockHeader > > 1 ) & 3 ) ;
2016-07-20 12:58:49 +00:00
bpPtr - > origSize = cSize ; /* only useful for RLE */
if ( bpPtr - > blockType = = bt_rle ) return 1 ;
2016-07-27 22:55:43 +00:00
if ( bpPtr - > blockType = = bt_reserved ) return ERROR ( corruption_detected ) ;
2016-07-20 12:58:49 +00:00
return cSize ;
}
2015-11-11 12:43:58 +00:00
}
2016-01-23 18:28:41 +00:00
2017-04-28 23:52:36 +00:00
static size_t ZSTD_copyRawBlock ( void * dst , size_t dstCapacity ,
const void * src , size_t srcSize )
2015-11-11 12:43:58 +00:00
{
2018-08-16 22:41:56 +00:00
if ( dst = = NULL ) return ERROR ( dstSize_tooSmall ) ;
2016-03-19 13:14:31 +00:00
if ( srcSize > dstCapacity ) return ERROR ( dstSize_tooSmall ) ;
2015-11-11 12:43:58 +00:00
memcpy ( dst , src , srcSize ) ;
return srcSize ;
}
2017-04-28 23:52:36 +00:00
static size_t ZSTD_setRleBlock ( void * dst , size_t dstCapacity ,
const void * src , size_t srcSize ,
size_t regenSize )
2016-07-27 22:55:43 +00:00
{
if ( srcSize ! = 1 ) return ERROR ( srcSize_wrong ) ;
if ( regenSize > dstCapacity ) return ERROR ( dstSize_tooSmall ) ;
memset ( dst , * ( const BYTE * ) src , regenSize ) ;
return regenSize ;
}
2016-02-04 14:28:14 +00:00
/*! ZSTD_decodeLiteralsBlock() :
2017-09-27 20:51:05 +00:00
* @ return : nb of bytes read from src ( < srcSize )
* note : symbol not declared but exposed for fullbench */
2015-11-12 14:36:05 +00:00
size_t ZSTD_decodeLiteralsBlock ( ZSTD_DCtx * dctx ,
2015-11-11 12:43:58 +00:00
const void * src , size_t srcSize ) /* note : srcSize < BLOCKSIZE */
{
if ( srcSize < MIN_CBLOCK_SIZE ) return ERROR ( corruption_detected ) ;
2016-07-22 13:04:25 +00:00
{ const BYTE * const istart = ( const BYTE * ) src ;
2016-07-23 14:31:49 +00:00
symbolEncodingType_e const litEncType = ( symbolEncodingType_e ) ( istart [ 0 ] & 3 ) ;
2016-07-22 13:04:25 +00:00
2016-07-23 14:31:49 +00:00
switch ( litEncType )
2016-07-22 13:04:25 +00:00
{
2016-07-23 14:31:49 +00:00
case set_repeat :
2016-07-22 13:04:25 +00:00
if ( dctx - > litEntropy = = 0 ) return ERROR ( dictionary_corrupted ) ;
/* fall-through */
2018-09-12 17:29:47 +00:00
2016-07-23 14:31:49 +00:00
case set_compressed :
2016-07-22 13:04:25 +00:00
if ( srcSize < 5 ) return ERROR ( corruption_detected ) ; /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
{ size_t lhSize , litSize , litCSize ;
U32 singleStream = 0 ;
U32 const lhlCode = ( istart [ 0 ] > > 2 ) & 3 ;
U32 const lhc = MEM_readLE32 ( istart ) ;
switch ( lhlCode )
{
case 0 : case 1 : default : /* note : default is impossible, since lhlCode into [0..3] */
/* 2 - 2 - 10 - 10 */
2016-09-13 14:52:16 +00:00
singleStream = ! lhlCode ;
lhSize = 3 ;
litSize = ( lhc > > 4 ) & 0x3FF ;
litCSize = ( lhc > > 14 ) & 0x3FF ;
break ;
2016-07-22 13:04:25 +00:00
case 2 :
/* 2 - 2 - 14 - 14 */
2016-09-13 14:52:16 +00:00
lhSize = 4 ;
litSize = ( lhc > > 4 ) & 0x3FFF ;
litCSize = lhc > > 18 ;
break ;
2016-07-22 13:04:25 +00:00
case 3 :
/* 2 - 2 - 18 - 18 */
2016-09-13 14:52:16 +00:00
lhSize = 5 ;
litSize = ( lhc > > 4 ) & 0x3FFFF ;
litCSize = ( lhc > > 22 ) + ( istart [ 4 ] < < 10 ) ;
break ;
2016-07-20 18:12:24 +00:00
}
2017-05-19 17:51:30 +00:00
if ( litSize > ZSTD_BLOCKSIZE_MAX ) return ERROR ( corruption_detected ) ;
2016-07-22 13:04:25 +00:00
if ( litCSize + lhSize > srcSize ) return ERROR ( corruption_detected ) ;
2018-09-12 17:29:47 +00:00
/* prefetch huffman table if cold */
2018-09-12 19:32:09 +00:00
if ( dctx - > ddictIsCold & & ( litSize > 768 /* heuristic */ ) ) {
2018-09-12 17:29:47 +00:00
PREFETCH_AREA ( dctx - > HUFptr , sizeof ( dctx - > entropy . hufTable ) ) ;
}
2016-07-23 14:31:49 +00:00
if ( HUF_isError ( ( litEncType = = set_repeat ) ?
2016-07-22 13:04:25 +00:00
( singleStream ?
2018-02-03 02:03:09 +00:00
HUF_decompress1X_usingDTable_bmi2 ( dctx - > litBuffer , litSize , istart + lhSize , litCSize , dctx - > HUFptr , dctx - > bmi2 ) :
HUF_decompress4X_usingDTable_bmi2 ( dctx - > litBuffer , litSize , istart + lhSize , litCSize , dctx - > HUFptr , dctx - > bmi2 ) ) :
2016-07-22 13:04:25 +00:00
( singleStream ?
2018-06-14 19:08:43 +00:00
HUF_decompress1X1_DCtx_wksp_bmi2 ( dctx - > entropy . hufTable , dctx - > litBuffer , litSize , istart + lhSize , litCSize ,
2018-09-07 00:07:53 +00:00
dctx - > workspace , sizeof ( dctx - > workspace ) , dctx - > bmi2 ) :
2018-02-03 02:03:09 +00:00
HUF_decompress4X_hufOnly_wksp_bmi2 ( dctx - > entropy . hufTable , dctx - > litBuffer , litSize , istart + lhSize , litCSize ,
2018-09-07 00:07:53 +00:00
dctx - > workspace , sizeof ( dctx - > workspace ) , dctx - > bmi2 ) ) ) )
2016-07-22 13:04:25 +00:00
return ERROR ( corruption_detected ) ;
dctx - > litPtr = dctx - > litBuffer ;
dctx - > litSize = litSize ;
dctx - > litEntropy = 1 ;
2017-02-26 02:33:31 +00:00
if ( litEncType = = set_compressed ) dctx - > HUFptr = dctx - > entropy . hufTable ;
2016-11-14 19:33:37 +00:00
memset ( dctx - > litBuffer + dctx - > litSize , 0 , WILDCOPY_OVERLENGTH ) ;
2016-07-22 13:04:25 +00:00
return litCSize + lhSize ;
}
2016-07-23 14:31:49 +00:00
case set_basic :
2016-07-22 13:04:25 +00:00
{ size_t litSize , lhSize ;
U32 const lhlCode = ( ( istart [ 0 ] ) > > 2 ) & 3 ;
switch ( lhlCode )
{
case 0 : case 2 : default : /* note : default is impossible, since lhlCode into [0..3] */
lhSize = 1 ;
litSize = istart [ 0 ] > > 3 ;
2016-07-20 18:12:24 +00:00
break ;
2016-07-22 13:04:25 +00:00
case 1 :
lhSize = 2 ;
litSize = MEM_readLE16 ( istart ) > > 4 ;
break ;
case 3 :
lhSize = 3 ;
litSize = MEM_readLE24 ( istart ) > > 4 ;
2016-07-20 18:12:24 +00:00
break ;
}
2016-01-23 18:28:41 +00:00
2016-07-22 13:04:25 +00:00
if ( lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize ) { /* risk reading beyond src buffer with wildcopy */
if ( litSize + lhSize > srcSize ) return ERROR ( corruption_detected ) ;
memcpy ( dctx - > litBuffer , istart + lhSize , litSize ) ;
dctx - > litPtr = dctx - > litBuffer ;
dctx - > litSize = litSize ;
2016-11-14 19:33:37 +00:00
memset ( dctx - > litBuffer + dctx - > litSize , 0 , WILDCOPY_OVERLENGTH ) ;
2016-07-22 13:04:25 +00:00
return lhSize + litSize ;
}
/* direct reference into compressed stream */
dctx - > litPtr = istart + lhSize ;
2015-11-11 12:43:58 +00:00
dctx - > litSize = litSize ;
2016-01-25 16:26:01 +00:00
return lhSize + litSize ;
2015-11-11 12:43:58 +00:00
}
2016-07-22 13:04:25 +00:00
2016-07-23 14:31:49 +00:00
case set_rle :
2016-07-22 13:04:25 +00:00
{ U32 const lhlCode = ( ( istart [ 0 ] ) > > 2 ) & 3 ;
size_t litSize , lhSize ;
switch ( lhlCode )
{
case 0 : case 2 : default : /* note : default is impossible, since lhlCode into [0..3] */
lhSize = 1 ;
litSize = istart [ 0 ] > > 3 ;
break ;
case 1 :
lhSize = 2 ;
litSize = MEM_readLE16 ( istart ) > > 4 ;
break ;
case 3 :
lhSize = 3 ;
litSize = MEM_readLE24 ( istart ) > > 4 ;
if ( srcSize < 4 ) return ERROR ( corruption_detected ) ; /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
break ;
}
2017-05-19 17:51:30 +00:00
if ( litSize > ZSTD_BLOCKSIZE_MAX ) return ERROR ( corruption_detected ) ;
2016-11-15 01:52:51 +00:00
memset ( dctx - > litBuffer , istart [ lhSize ] , litSize + WILDCOPY_OVERLENGTH ) ;
2016-07-22 13:04:25 +00:00
dctx - > litPtr = dctx - > litBuffer ;
dctx - > litSize = litSize ;
return lhSize + 1 ;
2016-01-23 18:28:41 +00:00
}
2016-07-22 13:04:25 +00:00
default :
return ERROR ( corruption_detected ) ; /* impossible */
2015-11-11 12:43:58 +00:00
}
}
}
2018-02-13 18:02:25 +00:00
/* Default FSE distribution tables.
* These are pre - calculated FSE decoding tables using default distributions as defined in specification :
* https : //github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
* They were generated programmatically with following method :
* - start from default distributions , present in / lib / common / zstd_internal . h
* - generate tables normally , using ZSTD_buildFSETable ( )
* - printout the content of tables
* - pretify output , report below , test with fuzzer to ensure it ' s correct */
2016-09-22 13:57:28 +00:00
2017-04-28 23:52:36 +00:00
/* Default FSE distribution table for Literal Lengths */
2018-02-09 12:50:58 +00:00
static const ZSTD_seqSymbol LL_defaultDTable [ ( 1 < < LL_DEFAULTNORMLOG ) + 1 ] = {
2018-02-09 14:01:02 +00:00
{ 1 , 1 , 1 , LL_DEFAULTNORMLOG } , /* header : fastMode, tableLog */
/* nextState, nbAddBits, nbBits, baseVal */
{ 0 , 0 , 4 , 0 } , { 16 , 0 , 4 , 0 } ,
{ 32 , 0 , 5 , 1 } , { 0 , 0 , 5 , 3 } ,
{ 0 , 0 , 5 , 4 } , { 0 , 0 , 5 , 6 } ,
{ 0 , 0 , 5 , 7 } , { 0 , 0 , 5 , 9 } ,
{ 0 , 0 , 5 , 10 } , { 0 , 0 , 5 , 12 } ,
{ 0 , 0 , 6 , 14 } , { 0 , 1 , 5 , 16 } ,
{ 0 , 1 , 5 , 20 } , { 0 , 1 , 5 , 22 } ,
{ 0 , 2 , 5 , 28 } , { 0 , 3 , 5 , 32 } ,
{ 0 , 4 , 5 , 48 } , { 32 , 6 , 5 , 64 } ,
{ 0 , 7 , 5 , 128 } , { 0 , 8 , 6 , 256 } ,
{ 0 , 10 , 6 , 1024 } , { 0 , 12 , 6 , 4096 } ,
{ 32 , 0 , 4 , 0 } , { 0 , 0 , 4 , 1 } ,
{ 0 , 0 , 5 , 2 } , { 32 , 0 , 5 , 4 } ,
{ 0 , 0 , 5 , 5 } , { 32 , 0 , 5 , 7 } ,
{ 0 , 0 , 5 , 8 } , { 32 , 0 , 5 , 10 } ,
{ 0 , 0 , 5 , 11 } , { 0 , 0 , 6 , 13 } ,
{ 32 , 1 , 5 , 16 } , { 0 , 1 , 5 , 18 } ,
{ 32 , 1 , 5 , 22 } , { 0 , 2 , 5 , 24 } ,
{ 32 , 3 , 5 , 32 } , { 0 , 3 , 5 , 40 } ,
{ 0 , 6 , 4 , 64 } , { 16 , 6 , 4 , 64 } ,
{ 32 , 7 , 5 , 128 } , { 0 , 9 , 6 , 512 } ,
{ 0 , 11 , 6 , 2048 } , { 48 , 0 , 4 , 0 } ,
{ 16 , 0 , 4 , 1 } , { 32 , 0 , 5 , 2 } ,
{ 32 , 0 , 5 , 3 } , { 32 , 0 , 5 , 5 } ,
{ 32 , 0 , 5 , 6 } , { 32 , 0 , 5 , 8 } ,
{ 32 , 0 , 5 , 9 } , { 32 , 0 , 5 , 11 } ,
{ 32 , 0 , 5 , 12 } , { 0 , 0 , 6 , 15 } ,
{ 32 , 1 , 5 , 18 } , { 32 , 1 , 5 , 20 } ,
{ 32 , 2 , 5 , 24 } , { 32 , 2 , 5 , 28 } ,
{ 32 , 3 , 5 , 40 } , { 32 , 4 , 5 , 48 } ,
{ 0 , 16 , 6 , 65536 } , { 0 , 15 , 6 , 32768 } ,
{ 0 , 14 , 6 , 16384 } , { 0 , 13 , 6 , 8192 } ,
2016-09-22 13:57:28 +00:00
} ; /* LL_defaultDTable */
2017-04-28 23:52:36 +00:00
/* Default FSE distribution table for Offset Codes */
2018-02-09 14:01:02 +00:00
static const ZSTD_seqSymbol OF_defaultDTable [ ( 1 < < OF_DEFAULTNORMLOG ) + 1 ] = {
{ 1 , 1 , 1 , OF_DEFAULTNORMLOG } , /* header : fastMode, tableLog */
/* nextState, nbAddBits, nbBits, baseVal */
{ 0 , 0 , 5 , 0 } , { 0 , 6 , 4 , 61 } ,
{ 0 , 9 , 5 , 509 } , { 0 , 15 , 5 , 32765 } ,
{ 0 , 21 , 5 , 2097149 } , { 0 , 3 , 5 , 5 } ,
{ 0 , 7 , 4 , 125 } , { 0 , 12 , 5 , 4093 } ,
{ 0 , 18 , 5 , 262141 } , { 0 , 23 , 5 , 8388605 } ,
{ 0 , 5 , 5 , 29 } , { 0 , 8 , 4 , 253 } ,
{ 0 , 14 , 5 , 16381 } , { 0 , 20 , 5 , 1048573 } ,
{ 0 , 2 , 5 , 1 } , { 16 , 7 , 4 , 125 } ,
{ 0 , 11 , 5 , 2045 } , { 0 , 17 , 5 , 131069 } ,
{ 0 , 22 , 5 , 4194301 } , { 0 , 4 , 5 , 13 } ,
{ 16 , 8 , 4 , 253 } , { 0 , 13 , 5 , 8189 } ,
{ 0 , 19 , 5 , 524285 } , { 0 , 1 , 5 , 1 } ,
{ 16 , 6 , 4 , 61 } , { 0 , 10 , 5 , 1021 } ,
{ 0 , 16 , 5 , 65533 } , { 0 , 28 , 5 , 268435453 } ,
{ 0 , 27 , 5 , 134217725 } , { 0 , 26 , 5 , 67108861 } ,
{ 0 , 25 , 5 , 33554429 } , { 0 , 24 , 5 , 16777213 } ,
2016-09-22 13:57:28 +00:00
} ; /* OF_defaultDTable */
2018-02-09 14:01:02 +00:00
2017-04-28 23:52:36 +00:00
/* Default FSE distribution table for Match Lengths */
2018-02-09 14:01:02 +00:00
static const ZSTD_seqSymbol ML_defaultDTable [ ( 1 < < ML_DEFAULTNORMLOG ) + 1 ] = {
{ 1 , 1 , 1 , ML_DEFAULTNORMLOG } , /* header : fastMode, tableLog */
/* nextState, nbAddBits, nbBits, baseVal */
{ 0 , 0 , 6 , 3 } , { 0 , 0 , 4 , 4 } ,
{ 32 , 0 , 5 , 5 } , { 0 , 0 , 5 , 6 } ,
{ 0 , 0 , 5 , 8 } , { 0 , 0 , 5 , 9 } ,
{ 0 , 0 , 5 , 11 } , { 0 , 0 , 6 , 13 } ,
{ 0 , 0 , 6 , 16 } , { 0 , 0 , 6 , 19 } ,
{ 0 , 0 , 6 , 22 } , { 0 , 0 , 6 , 25 } ,
{ 0 , 0 , 6 , 28 } , { 0 , 0 , 6 , 31 } ,
{ 0 , 0 , 6 , 34 } , { 0 , 1 , 6 , 37 } ,
{ 0 , 1 , 6 , 41 } , { 0 , 2 , 6 , 47 } ,
{ 0 , 3 , 6 , 59 } , { 0 , 4 , 6 , 83 } ,
{ 0 , 7 , 6 , 131 } , { 0 , 9 , 6 , 515 } ,
{ 16 , 0 , 4 , 4 } , { 0 , 0 , 4 , 5 } ,
{ 32 , 0 , 5 , 6 } , { 0 , 0 , 5 , 7 } ,
{ 32 , 0 , 5 , 9 } , { 0 , 0 , 5 , 10 } ,
{ 0 , 0 , 6 , 12 } , { 0 , 0 , 6 , 15 } ,
{ 0 , 0 , 6 , 18 } , { 0 , 0 , 6 , 21 } ,
{ 0 , 0 , 6 , 24 } , { 0 , 0 , 6 , 27 } ,
{ 0 , 0 , 6 , 30 } , { 0 , 0 , 6 , 33 } ,
{ 0 , 1 , 6 , 35 } , { 0 , 1 , 6 , 39 } ,
{ 0 , 2 , 6 , 43 } , { 0 , 3 , 6 , 51 } ,
{ 0 , 4 , 6 , 67 } , { 0 , 5 , 6 , 99 } ,
{ 0 , 8 , 6 , 259 } , { 32 , 0 , 4 , 4 } ,
{ 48 , 0 , 4 , 4 } , { 16 , 0 , 4 , 5 } ,
{ 32 , 0 , 5 , 7 } , { 32 , 0 , 5 , 8 } ,
{ 32 , 0 , 5 , 10 } , { 32 , 0 , 5 , 11 } ,
{ 0 , 0 , 6 , 14 } , { 0 , 0 , 6 , 17 } ,
{ 0 , 0 , 6 , 20 } , { 0 , 0 , 6 , 23 } ,
{ 0 , 0 , 6 , 26 } , { 0 , 0 , 6 , 29 } ,
{ 0 , 0 , 6 , 32 } , { 0 , 16 , 6 , 65539 } ,
{ 0 , 15 , 6 , 32771 } , { 0 , 14 , 6 , 16387 } ,
{ 0 , 13 , 6 , 8195 } , { 0 , 12 , 6 , 4099 } ,
{ 0 , 11 , 6 , 2051 } , { 0 , 10 , 6 , 1027 } ,
2016-09-22 13:57:28 +00:00
} ; /* ML_defaultDTable */
2018-02-09 12:25:15 +00:00
2018-02-09 13:12:13 +00:00
static void ZSTD_buildSeqTable_rle ( ZSTD_seqSymbol * dt , U32 baseValue , U32 nbAddBits )
2018-02-09 12:25:15 +00:00
{
void * ptr = dt ;
ZSTD_seqSymbol_header * const DTableH = ( ZSTD_seqSymbol_header * ) ptr ;
ZSTD_seqSymbol * const cell = dt + 1 ;
DTableH - > tableLog = 0 ;
DTableH - > fastMode = 0 ;
cell - > nbBits = 0 ;
cell - > nextState = 0 ;
2018-02-09 13:12:13 +00:00
assert ( nbAddBits < 255 ) ;
cell - > nbAdditionalBits = ( BYTE ) nbAddBits ;
cell - > baseValue = baseValue ;
2018-02-09 12:25:15 +00:00
}
/* ZSTD_buildFSETable() :
* generate FSE decoding table for one symbol ( ll , ml or off ) */
static void
ZSTD_buildFSETable ( ZSTD_seqSymbol * dt ,
const short * normalizedCounter , unsigned maxSymbolValue ,
const U32 * baseValue , const U32 * nbAdditionalBits ,
unsigned tableLog )
{
ZSTD_seqSymbol * const tableDecode = dt + 1 ;
U16 symbolNext [ MaxSeq + 1 ] ;
U32 const maxSV1 = maxSymbolValue + 1 ;
U32 const tableSize = 1 < < tableLog ;
U32 highThreshold = tableSize - 1 ;
/* Sanity Checks */
assert ( maxSymbolValue < = MaxSeq ) ;
assert ( tableLog < = MaxFSELog ) ;
/* Init, lay down lowprob symbols */
{ ZSTD_seqSymbol_header DTableH ;
DTableH . tableLog = tableLog ;
DTableH . fastMode = 1 ;
{ S16 const largeLimit = ( S16 ) ( 1 < < ( tableLog - 1 ) ) ;
U32 s ;
for ( s = 0 ; s < maxSV1 ; s + + ) {
if ( normalizedCounter [ s ] = = - 1 ) {
tableDecode [ highThreshold - - ] . baseValue = s ;
symbolNext [ s ] = 1 ;
} else {
if ( normalizedCounter [ s ] > = largeLimit ) DTableH . fastMode = 0 ;
symbolNext [ s ] = normalizedCounter [ s ] ;
} } }
memcpy ( dt , & DTableH , sizeof ( DTableH ) ) ;
}
/* Spread symbols */
{ U32 const tableMask = tableSize - 1 ;
U32 const step = FSE_TABLESTEP ( tableSize ) ;
U32 s , position = 0 ;
for ( s = 0 ; s < maxSV1 ; s + + ) {
int i ;
for ( i = 0 ; i < normalizedCounter [ s ] ; i + + ) {
tableDecode [ position ] . baseValue = s ;
position = ( position + step ) & tableMask ;
while ( position > highThreshold ) position = ( position + step ) & tableMask ; /* lowprob area */
} }
assert ( position = = 0 ) ; /* position must reach all cells once, otherwise normalizedCounter is incorrect */
}
/* Build Decoding table */
{ U32 u ;
for ( u = 0 ; u < tableSize ; u + + ) {
U32 const symbol = tableDecode [ u ] . baseValue ;
U32 const nextState = symbolNext [ symbol ] + + ;
tableDecode [ u ] . nbBits = ( BYTE ) ( tableLog - BIT_highbit32 ( nextState ) ) ;
tableDecode [ u ] . nextState = ( U16 ) ( ( nextState < < tableDecode [ u ] . nbBits ) - tableSize ) ;
assert ( nbAdditionalBits [ symbol ] < 255 ) ;
tableDecode [ u ] . nbAdditionalBits = ( BYTE ) nbAdditionalBits [ symbol ] ;
tableDecode [ u ] . baseValue = baseValue [ symbol ] ;
} }
}
2016-09-22 13:57:28 +00:00
2016-03-18 21:23:49 +00:00
/*! ZSTD_buildSeqTable() :
2017-09-25 21:26:26 +00:00
* @ return : nb bytes read from src ,
2018-02-09 12:25:15 +00:00
* or an error code if it fails */
static size_t ZSTD_buildSeqTable ( ZSTD_seqSymbol * DTableSpace , const ZSTD_seqSymbol * * DTablePtr ,
2016-09-22 13:57:28 +00:00
symbolEncodingType_e type , U32 max , U32 maxLog ,
2016-03-23 00:32:41 +00:00
const void * src , size_t srcSize ,
2018-02-09 12:25:15 +00:00
const U32 * baseValue , const U32 * nbAdditionalBits ,
2018-09-12 00:23:44 +00:00
const ZSTD_seqSymbol * defaultTable , U32 flagRepeatTable ,
2018-09-12 17:29:47 +00:00
int ddictIsCold , int nbSeq )
2016-03-21 12:24:16 +00:00
{
switch ( type )
{
2016-07-23 14:31:49 +00:00
case set_rle :
2016-03-21 12:24:16 +00:00
if ( ! srcSize ) return ERROR ( srcSize_wrong ) ;
if ( ( * ( const BYTE * ) src ) > max ) return ERROR ( corruption_detected ) ;
2018-02-09 13:12:13 +00:00
{ U32 const symbol = * ( const BYTE * ) src ;
U32 const baseline = baseValue [ symbol ] ;
U32 const nbBits = nbAdditionalBits [ symbol ] ;
ZSTD_buildSeqTable_rle ( DTableSpace , baseline , nbBits ) ;
}
2016-09-22 13:57:28 +00:00
* DTablePtr = DTableSpace ;
2016-03-21 12:24:16 +00:00
return 1 ;
2016-07-23 14:31:49 +00:00
case set_basic :
2018-02-09 14:01:02 +00:00
* DTablePtr = defaultTable ;
2016-03-22 22:19:28 +00:00
return 0 ;
2016-07-23 14:31:49 +00:00
case set_repeat :
2016-04-09 15:26:22 +00:00
if ( ! flagRepeatTable ) return ERROR ( corruption_detected ) ;
2018-09-12 00:23:44 +00:00
/* prefetch FSE table if used */
2018-09-12 19:32:09 +00:00
if ( ddictIsCold & & ( nbSeq > 24 /* heuristic */ ) ) {
2018-09-12 00:23:44 +00:00
const void * const pStart = * DTablePtr ;
size_t const pSize = sizeof ( ZSTD_seqSymbol ) * ( SEQSYMBOL_TABLE_SIZE ( maxLog ) ) ;
PREFETCH_AREA ( pStart , pSize ) ;
}
2016-03-22 22:19:28 +00:00
return 0 ;
2016-07-23 14:31:49 +00:00
case set_compressed :
2016-03-22 22:19:28 +00:00
{ U32 tableLog ;
S16 norm [ MaxSeq + 1 ] ;
size_t const headerSize = FSE_readNCount ( norm , & max , & tableLog , src , srcSize ) ;
if ( FSE_isError ( headerSize ) ) return ERROR ( corruption_detected ) ;
if ( tableLog > maxLog ) return ERROR ( corruption_detected ) ;
2018-02-09 12:25:15 +00:00
ZSTD_buildFSETable ( DTableSpace , norm , max , baseValue , nbAdditionalBits , tableLog ) ;
2016-09-22 13:57:28 +00:00
* DTablePtr = DTableSpace ;
2016-03-22 22:19:28 +00:00
return headerSize ;
2018-02-09 23:15:46 +00:00
}
default : /* impossible */
assert ( 0 ) ;
2018-02-09 23:34:59 +00:00
return ERROR ( GENERIC ) ;
2018-02-09 23:15:46 +00:00
}
2016-03-22 22:19:28 +00:00
}
2018-02-09 12:25:15 +00:00
static const U32 LL_base [ MaxLL + 1 ] = {
0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ,
8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ,
16 , 18 , 20 , 22 , 24 , 28 , 32 , 40 ,
48 , 64 , 0x80 , 0x100 , 0x200 , 0x400 , 0x800 , 0x1000 ,
0x2000 , 0x4000 , 0x8000 , 0x10000 } ;
static const U32 OF_base [ MaxOff + 1 ] = {
0 , 1 , 1 , 5 , 0xD , 0x1D , 0x3D , 0x7D ,
0xFD , 0x1FD , 0x3FD , 0x7FD , 0xFFD , 0x1FFD , 0x3FFD , 0x7FFD ,
0xFFFD , 0x1FFFD , 0x3FFFD , 0x7FFFD , 0xFFFFD , 0x1FFFFD , 0x3FFFFD , 0x7FFFFD ,
0xFFFFFD , 0x1FFFFFD , 0x3FFFFFD , 0x7FFFFFD , 0xFFFFFFD , 0x1FFFFFFD , 0x3FFFFFFD , 0x7FFFFFFD } ;
static const U32 OF_bits [ MaxOff + 1 ] = {
0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ,
8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ,
16 , 17 , 18 , 19 , 20 , 21 , 22 , 23 ,
24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 } ;
static const U32 ML_base [ MaxML + 1 ] = {
3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ,
11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 ,
19 , 20 , 21 , 22 , 23 , 24 , 25 , 26 ,
27 , 28 , 29 , 30 , 31 , 32 , 33 , 34 ,
35 , 37 , 39 , 41 , 43 , 47 , 51 , 59 ,
67 , 83 , 99 , 0x83 , 0x103 , 0x203 , 0x403 , 0x803 ,
0x1003 , 0x2003 , 0x4003 , 0x8003 , 0x10003 } ;
2018-02-09 14:01:02 +00:00
2016-09-13 14:52:16 +00:00
size_t ZSTD_decodeSeqHeaders ( ZSTD_DCtx * dctx , int * nbSeqPtr ,
2016-03-22 22:19:28 +00:00
const void * src , size_t srcSize )
2015-11-11 12:43:58 +00:00
{
const BYTE * const istart = ( const BYTE * const ) src ;
const BYTE * const iend = istart + srcSize ;
2016-03-24 00:27:55 +00:00
const BYTE * ip = istart ;
2018-09-12 17:29:47 +00:00
int nbSeq ;
2017-06-28 18:09:43 +00:00
DEBUGLOG ( 5 , " ZSTD_decodeSeqHeaders " ) ;
2015-11-11 12:43:58 +00:00
/* check */
2016-03-18 21:23:49 +00:00
if ( srcSize < MIN_SEQUENCES_SIZE ) return ERROR ( srcSize_wrong ) ;
2015-11-11 12:43:58 +00:00
/* SeqHead */
2018-09-12 17:29:47 +00:00
nbSeq = * ip + + ;
if ( ! nbSeq ) { * nbSeqPtr = 0 ; return 1 ; }
if ( nbSeq > 0x7F ) {
if ( nbSeq = = 0xFF ) {
if ( ip + 2 > iend ) return ERROR ( srcSize_wrong ) ;
nbSeq = MEM_readLE16 ( ip ) + LONGNBSEQ , ip + = 2 ;
} else {
if ( ip > = iend ) return ERROR ( srcSize_wrong ) ;
nbSeq = ( ( nbSeq - 0x80 ) < < 8 ) + * ip + + ;
2016-03-24 00:27:55 +00:00
}
2016-03-04 13:45:31 +00:00
}
2018-09-12 17:29:47 +00:00
* nbSeqPtr = nbSeq ;
2016-01-30 23:58:06 +00:00
2016-03-04 13:45:31 +00:00
/* FSE table descriptors */
2016-07-24 16:02:04 +00:00
if ( ip + 4 > iend ) return ERROR ( srcSize_wrong ) ; /* minimum possible size */
2016-07-23 14:31:49 +00:00
{ symbolEncodingType_e const LLtype = ( symbolEncodingType_e ) ( * ip > > 6 ) ;
symbolEncodingType_e const OFtype = ( symbolEncodingType_e ) ( ( * ip > > 4 ) & 3 ) ;
symbolEncodingType_e const MLtype = ( symbolEncodingType_e ) ( ( * ip > > 2 ) & 3 ) ;
2016-03-24 00:27:55 +00:00
ip + + ;
2015-11-11 12:43:58 +00:00
/* Build DTables */
2017-02-26 02:33:31 +00:00
{ size_t const llhSize = ZSTD_buildSeqTable ( dctx - > entropy . LLTable , & dctx - > LLTptr ,
2016-09-22 13:57:28 +00:00
LLtype , MaxLL , LLFSELog ,
2018-02-09 12:25:15 +00:00
ip , iend - ip ,
LL_base , LL_bits ,
2018-09-12 00:23:44 +00:00
LL_defaultDTable , dctx - > fseEntropy ,
2018-09-12 17:29:47 +00:00
dctx - > ddictIsCold , nbSeq ) ;
2016-07-04 14:13:11 +00:00
if ( ZSTD_isError ( llhSize ) ) return ERROR ( corruption_detected ) ;
ip + = llhSize ;
2015-11-11 12:43:58 +00:00
}
2018-02-09 12:25:15 +00:00
2017-02-26 02:33:31 +00:00
{ size_t const ofhSize = ZSTD_buildSeqTable ( dctx - > entropy . OFTable , & dctx - > OFTptr ,
2016-09-22 13:57:28 +00:00
OFtype , MaxOff , OffFSELog ,
2018-02-09 12:25:15 +00:00
ip , iend - ip ,
OF_base , OF_bits ,
2018-09-12 00:23:44 +00:00
OF_defaultDTable , dctx - > fseEntropy ,
2018-09-12 17:29:47 +00:00
dctx - > ddictIsCold , nbSeq ) ;
2016-07-04 14:13:11 +00:00
if ( ZSTD_isError ( ofhSize ) ) return ERROR ( corruption_detected ) ;
ip + = ofhSize ;
2015-11-11 12:43:58 +00:00
}
2018-02-09 12:25:15 +00:00
2017-02-26 02:33:31 +00:00
{ size_t const mlhSize = ZSTD_buildSeqTable ( dctx - > entropy . MLTable , & dctx - > MLTptr ,
2016-09-22 13:57:28 +00:00
MLtype , MaxML , MLFSELog ,
2018-02-09 12:25:15 +00:00
ip , iend - ip ,
ML_base , ML_bits ,
2018-09-12 00:23:44 +00:00
ML_defaultDTable , dctx - > fseEntropy ,
2018-09-12 17:29:47 +00:00
dctx - > ddictIsCold , nbSeq ) ;
2016-07-04 14:13:11 +00:00
if ( ZSTD_isError ( mlhSize ) ) return ERROR ( corruption_detected ) ;
ip + = mlhSize ;
2016-09-22 13:57:28 +00:00
}
}
2018-09-12 22:35:21 +00:00
/* prefetch dictionary content */
if ( dctx - > ddictIsCold ) {
size_t const dictSize = ( const char * ) dctx - > prefixStart - ( const char * ) dctx - > virtualStart ;
size_t const pSize = MIN ( dictSize , ( size_t ) ( 64 * nbSeq ) ) ;
const void * const pStart = ( const char * ) dctx - > dictEnd - pSize ;
DEBUGLOG ( 2 , " dictSize: %zu ; prefetchSize: %zu " , dictSize , pSize ) ;
PREFETCH_AREA ( pStart , pSize ) ;
dctx - > ddictIsCold = 0 ;
}
2015-11-11 12:43:58 +00:00
return ip - istart ;
}
typedef struct {
size_t litLength ;
size_t matchLength ;
2016-01-30 23:58:06 +00:00
size_t offset ;
2016-11-23 23:43:30 +00:00
const BYTE * match ;
2015-11-11 12:43:58 +00:00
} seq_t ;
2018-02-09 12:25:15 +00:00
typedef struct {
size_t state ;
const ZSTD_seqSymbol * table ;
} ZSTD_fseState ;
2015-11-11 12:43:58 +00:00
typedef struct {
BIT_DStream_t DStream ;
2018-02-09 12:25:15 +00:00
ZSTD_fseState stateLL ;
ZSTD_fseState stateOffb ;
ZSTD_fseState stateML ;
2016-07-30 13:32:47 +00:00
size_t prevOffset [ ZSTD_REP_NUM ] ;
2017-11-17 19:40:08 +00:00
const BYTE * prefixStart ;
const BYTE * dictEnd ;
2016-11-29 21:12:24 +00:00
size_t pos ;
2015-11-11 12:43:58 +00:00
} seqState_t ;
2016-02-03 11:53:07 +00:00
2016-11-29 23:30:23 +00:00
FORCE_NOINLINE
size_t ZSTD_execSequenceLast7 ( BYTE * op ,
BYTE * const oend , seq_t sequence ,
const BYTE * * litPtr , const BYTE * const litLimit ,
const BYTE * const base , const BYTE * const vBase , const BYTE * const dictEnd )
{
BYTE * const oLitEnd = op + sequence . litLength ;
size_t const sequenceLength = sequence . litLength + sequence . matchLength ;
BYTE * const oMatchEnd = op + sequenceLength ; /* risk : address space overflow (32-bits) */
BYTE * const oend_w = oend - WILDCOPY_OVERLENGTH ;
const BYTE * const iLitEnd = * litPtr + sequence . litLength ;
const BYTE * match = oLitEnd - sequence . offset ;
/* check */
if ( oMatchEnd > oend ) return ERROR ( dstSize_tooSmall ) ; /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
if ( iLitEnd > litLimit ) return ERROR ( corruption_detected ) ; /* over-read beyond lit buffer */
if ( oLitEnd < = oend_w ) return ERROR ( GENERIC ) ; /* Precondition */
/* copy literals */
if ( op < oend_w ) {
ZSTD_wildcopy ( op , * litPtr , oend_w - op ) ;
* litPtr + = oend_w - op ;
op = oend_w ;
}
while ( op < oLitEnd ) * op + + = * ( * litPtr ) + + ;
/* copy Match */
if ( sequence . offset > ( size_t ) ( oLitEnd - base ) ) {
/* offset beyond prefix */
if ( sequence . offset > ( size_t ) ( oLitEnd - vBase ) ) return ERROR ( corruption_detected ) ;
match = dictEnd - ( base - match ) ;
if ( match + sequence . matchLength < = dictEnd ) {
memmove ( oLitEnd , match , sequence . matchLength ) ;
return sequenceLength ;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match ;
memmove ( oLitEnd , match , length1 ) ;
op = oLitEnd + length1 ;
sequence . matchLength - = length1 ;
match = base ;
} }
while ( op < oMatchEnd ) * op + + = * match + + ;
return sequenceLength ;
}
2017-08-15 00:20:50 +00:00
HINT_INLINE
2016-11-29 23:30:23 +00:00
size_t ZSTD_execSequence ( BYTE * op ,
2017-02-25 18:11:15 +00:00
BYTE * const oend , seq_t sequence ,
const BYTE * * litPtr , const BYTE * const litLimit ,
2018-06-01 22:18:32 +00:00
const BYTE * const prefixStart , const BYTE * const virtualStart , const BYTE * const dictEnd )
2015-11-11 12:43:58 +00:00
{
BYTE * const oLitEnd = op + sequence . litLength ;
2016-03-24 00:27:55 +00:00
size_t const sequenceLength = sequence . litLength + sequence . matchLength ;
2015-11-19 16:13:19 +00:00
BYTE * const oMatchEnd = op + sequenceLength ; /* risk : address space overflow (32-bits) */
2016-07-23 23:21:53 +00:00
BYTE * const oend_w = oend - WILDCOPY_OVERLENGTH ;
2016-04-03 18:24:25 +00:00
const BYTE * const iLitEnd = * litPtr + sequence . litLength ;
2016-11-29 23:30:23 +00:00
const BYTE * match = oLitEnd - sequence . offset ;
2015-11-11 12:43:58 +00:00
/* check */
2016-10-21 23:55:26 +00:00
if ( oMatchEnd > oend ) return ERROR ( dstSize_tooSmall ) ; /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
2016-11-14 19:33:37 +00:00
if ( iLitEnd > litLimit ) return ERROR ( corruption_detected ) ; /* over-read beyond lit buffer */
2018-06-01 22:18:32 +00:00
if ( oLitEnd > oend_w ) return ZSTD_execSequenceLast7 ( op , oend , sequence , litPtr , litLimit , prefixStart , virtualStart , dictEnd ) ;
2015-11-11 12:43:58 +00:00
/* copy Literals */
2016-08-20 00:59:04 +00:00
ZSTD_copy8 ( op , * litPtr ) ;
if ( sequence . litLength > 8 )
ZSTD_wildcopy ( op + 8 , ( * litPtr ) + 8 , sequence . litLength - 8 ) ; /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
2015-11-11 12:43:58 +00:00
op = oLitEnd ;
2016-04-03 18:24:25 +00:00
* litPtr = iLitEnd ; /* update for next sequence */
2015-11-11 12:43:58 +00:00
/* copy Match */
2018-06-01 22:18:32 +00:00
if ( sequence . offset > ( size_t ) ( oLitEnd - prefixStart ) ) {
2017-04-28 23:52:36 +00:00
/* offset beyond prefix -> go into extDict */
2018-06-01 22:18:32 +00:00
if ( sequence . offset > ( size_t ) ( oLitEnd - virtualStart ) )
2017-06-28 18:09:43 +00:00
return ERROR ( corruption_detected ) ;
2018-06-01 22:18:32 +00:00
match = dictEnd + ( match - prefixStart ) ;
2016-01-26 14:58:49 +00:00
if ( match + sequence . matchLength < = dictEnd ) {
2015-12-06 12:18:37 +00:00
memmove ( oLitEnd , match , sequence . matchLength ) ;
2015-11-30 22:13:56 +00:00
return sequenceLength ;
}
/* span extDict & currentPrefixSegment */
2016-03-19 11:12:07 +00:00
{ size_t const length1 = dictEnd - match ;
2015-12-06 12:18:37 +00:00
memmove ( oLitEnd , match , length1 ) ;
2015-11-30 22:13:56 +00:00
op = oLitEnd + length1 ;
sequence . matchLength - = length1 ;
2018-06-01 22:18:32 +00:00
match = prefixStart ;
2016-12-13 02:05:30 +00:00
if ( op > oend_w | | sequence . matchLength < MINMATCH ) {
2016-10-27 23:19:54 +00:00
U32 i ;
for ( i = 0 ; i < sequence . matchLength ; + + i ) op [ i ] = match [ i ] ;
2016-10-10 23:19:21 +00:00
return sequenceLength ;
}
2016-01-27 23:18:06 +00:00
} }
2016-12-13 02:05:30 +00:00
/* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
2015-11-30 22:13:56 +00:00
/* match within prefix */
2016-01-26 14:58:49 +00:00
if ( sequence . offset < 8 ) {
2015-11-30 22:13:56 +00:00
/* close range match, overlap */
2016-03-24 00:27:55 +00:00
static const U32 dec32table [ ] = { 0 , 1 , 2 , 1 , 4 , 4 , 4 , 4 } ; /* added */
2017-02-20 20:08:59 +00:00
static const int dec64table [ ] = { 8 , 8 , 8 , 7 , 8 , 9 , 10 , 11 } ; /* subtracted */
2016-03-24 00:27:55 +00:00
int const sub2 = dec64table [ sequence . offset ] ;
2015-11-30 22:13:56 +00:00
op [ 0 ] = match [ 0 ] ;
op [ 1 ] = match [ 1 ] ;
op [ 2 ] = match [ 2 ] ;
op [ 3 ] = match [ 3 ] ;
match + = dec32table [ sequence . offset ] ;
ZSTD_copy4 ( op + 4 , match ) ;
match - = sub2 ;
2016-01-27 23:18:06 +00:00
} else {
2015-11-30 22:13:56 +00:00
ZSTD_copy8 ( op , match ) ;
}
op + = 8 ; match + = 8 ;
2016-04-06 07:46:01 +00:00
if ( oMatchEnd > oend - ( 16 - MINMATCH ) ) {
2016-06-19 12:27:21 +00:00
if ( op < oend_w ) {
ZSTD_wildcopy ( op , match , oend_w - op ) ;
match + = oend_w - op ;
op = oend_w ;
2015-11-30 22:13:56 +00:00
}
2016-03-19 13:14:31 +00:00
while ( op < oMatchEnd ) * op + + = * match + + ;
2016-01-27 23:18:06 +00:00
} else {
2016-12-13 03:01:23 +00:00
ZSTD_wildcopy ( op , match , ( ptrdiff_t ) sequence . matchLength - 8 ) ; /* works even if matchLength < 8 */
2015-11-30 22:13:56 +00:00
}
return sequenceLength ;
2015-11-11 12:43:58 +00:00
}
2015-11-19 16:13:19 +00:00
2017-08-15 00:20:50 +00:00
HINT_INLINE
2016-11-29 23:30:23 +00:00
size_t ZSTD_execSequenceLong ( BYTE * op ,
2017-09-01 07:05:37 +00:00
BYTE * const oend , seq_t sequence ,
const BYTE * * litPtr , const BYTE * const litLimit ,
2017-11-17 19:40:08 +00:00
const BYTE * const prefixStart , const BYTE * const dictStart , const BYTE * const dictEnd )
2016-11-29 00:11:30 +00:00
{
BYTE * const oLitEnd = op + sequence . litLength ;
size_t const sequenceLength = sequence . litLength + sequence . matchLength ;
BYTE * const oMatchEnd = op + sequenceLength ; /* risk : address space overflow (32-bits) */
BYTE * const oend_w = oend - WILDCOPY_OVERLENGTH ;
const BYTE * const iLitEnd = * litPtr + sequence . litLength ;
2016-11-29 23:30:23 +00:00
const BYTE * match = sequence . match ;
2016-11-29 00:11:30 +00:00
/* check */
2017-11-17 19:40:08 +00:00
if ( oMatchEnd > oend ) return ERROR ( dstSize_tooSmall ) ; /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
2016-11-29 00:11:30 +00:00
if ( iLitEnd > litLimit ) return ERROR ( corruption_detected ) ; /* over-read beyond lit buffer */
2017-11-17 19:40:08 +00:00
if ( oLitEnd > oend_w ) return ZSTD_execSequenceLast7 ( op , oend , sequence , litPtr , litLimit , prefixStart , dictStart , dictEnd ) ;
2016-11-29 00:11:30 +00:00
/* copy Literals */
2017-11-17 19:40:08 +00:00
ZSTD_copy8 ( op , * litPtr ) ; /* note : op <= oLitEnd <= oend_w == oend - 8 */
2016-11-29 00:11:30 +00:00
if ( sequence . litLength > 8 )
ZSTD_wildcopy ( op + 8 , ( * litPtr ) + 8 , sequence . litLength - 8 ) ; /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
op = oLitEnd ;
* litPtr = iLitEnd ; /* update for next sequence */
/* copy Match */
2017-11-17 19:40:08 +00:00
if ( sequence . offset > ( size_t ) ( oLitEnd - prefixStart ) ) {
2016-11-29 00:11:30 +00:00
/* offset beyond prefix */
2017-11-17 19:40:08 +00:00
if ( sequence . offset > ( size_t ) ( oLitEnd - dictStart ) ) return ERROR ( corruption_detected ) ;
2016-11-29 00:11:30 +00:00
if ( match + sequence . matchLength < = dictEnd ) {
memmove ( oLitEnd , match , sequence . matchLength ) ;
return sequenceLength ;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match ;
memmove ( oLitEnd , match , length1 ) ;
op = oLitEnd + length1 ;
sequence . matchLength - = length1 ;
2017-11-17 19:40:08 +00:00
match = prefixStart ;
2016-12-13 02:05:30 +00:00
if ( op > oend_w | | sequence . matchLength < MINMATCH ) {
2016-11-29 00:11:30 +00:00
U32 i ;
for ( i = 0 ; i < sequence . matchLength ; + + i ) op [ i ] = match [ i ] ;
return sequenceLength ;
}
} }
2017-09-01 07:05:37 +00:00
assert ( op < = oend_w ) ;
assert ( sequence . matchLength > = MINMATCH ) ;
2016-11-29 00:11:30 +00:00
/* match within prefix */
if ( sequence . offset < 8 ) {
/* close range match, overlap */
static const U32 dec32table [ ] = { 0 , 1 , 2 , 1 , 4 , 4 , 4 , 4 } ; /* added */
2017-02-20 20:08:59 +00:00
static const int dec64table [ ] = { 8 , 8 , 8 , 7 , 8 , 9 , 10 , 11 } ; /* subtracted */
2016-11-29 00:11:30 +00:00
int const sub2 = dec64table [ sequence . offset ] ;
op [ 0 ] = match [ 0 ] ;
op [ 1 ] = match [ 1 ] ;
op [ 2 ] = match [ 2 ] ;
op [ 3 ] = match [ 3 ] ;
match + = dec32table [ sequence . offset ] ;
ZSTD_copy4 ( op + 4 , match ) ;
match - = sub2 ;
} else {
ZSTD_copy8 ( op , match ) ;
}
op + = 8 ; match + = 8 ;
if ( oMatchEnd > oend - ( 16 - MINMATCH ) ) {
if ( op < oend_w ) {
ZSTD_wildcopy ( op , match , oend_w - op ) ;
match + = oend_w - op ;
op = oend_w ;
}
while ( op < oMatchEnd ) * op + + = * match + + ;
} else {
2016-12-13 03:01:23 +00:00
ZSTD_wildcopy ( op , match , ( ptrdiff_t ) sequence . matchLength - 8 ) ; /* works even if matchLength < 8 */
2016-11-29 00:11:30 +00:00
}
return sequenceLength ;
}
2018-03-10 02:16:10 +00:00
static void
ZSTD_initFseState ( ZSTD_fseState * DStatePtr , BIT_DStream_t * bitD , const ZSTD_seqSymbol * dt )
{
const void * ptr = dt ;
const ZSTD_seqSymbol_header * const DTableH = ( const ZSTD_seqSymbol_header * ) ptr ;
DStatePtr - > state = BIT_readBits ( bitD , DTableH - > tableLog ) ;
DEBUGLOG ( 6 , " ZSTD_initFseState : val=%u using %u bits " ,
( U32 ) DStatePtr - > state , DTableH - > tableLog ) ;
BIT_reloadDStream ( bitD ) ;
DStatePtr - > table = dt + 1 ;
}
2018-03-10 02:03:25 +00:00
FORCE_INLINE_TEMPLATE void
ZSTD_updateFseState ( ZSTD_fseState * DStatePtr , BIT_DStream_t * bitD )
{
ZSTD_seqSymbol const DInfo = DStatePtr - > table [ DStatePtr - > state ] ;
U32 const nbBits = DInfo . nbBits ;
size_t const lowBits = BIT_readBits ( bitD , nbBits ) ;
DStatePtr - > state = DInfo . nextState + lowBits ;
}
/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
* offset bits . But we can only read at most ( STREAM_ACCUMULATOR_MIN_32 - 1 )
* bits before reloading . This value is the maximum number of bytes we read
* after reloading when we are decoding long offets .
*/
# define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
( ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
: 0 )
2018-02-03 02:03:09 +00:00
typedef enum { ZSTD_lo_isRegularOffset , ZSTD_lo_isLongOffset = 1 } ZSTD_longOffset_e ;
2015-11-19 16:13:19 +00:00
2018-03-10 02:03:25 +00:00
FORCE_INLINE_TEMPLATE seq_t
2018-03-10 03:48:06 +00:00
ZSTD_decodeSequence ( seqState_t * seqState , const ZSTD_longOffset_e longOffsets )
2018-03-10 02:03:25 +00:00
{
seq_t seq ;
U32 const llBits = seqState - > stateLL . table [ seqState - > stateLL . state ] . nbAdditionalBits ;
U32 const mlBits = seqState - > stateML . table [ seqState - > stateML . state ] . nbAdditionalBits ;
U32 const ofBits = seqState - > stateOffb . table [ seqState - > stateOffb . state ] . nbAdditionalBits ;
U32 const totalBits = llBits + mlBits + ofBits ;
U32 const llBase = seqState - > stateLL . table [ seqState - > stateLL . state ] . baseValue ;
U32 const mlBase = seqState - > stateML . table [ seqState - > stateML . state ] . baseValue ;
U32 const ofBase = seqState - > stateOffb . table [ seqState - > stateOffb . state ] . baseValue ;
/* sequence */
{ size_t offset ;
if ( ! ofBits )
offset = 0 ;
else {
ZSTD_STATIC_ASSERT ( ZSTD_lo_isLongOffset = = 1 ) ;
ZSTD_STATIC_ASSERT ( LONG_OFFSETS_MAX_EXTRA_BITS_32 = = 5 ) ;
assert ( ofBits < = MaxOff ) ;
if ( MEM_32bits ( ) & & longOffsets & & ( ofBits > = STREAM_ACCUMULATOR_MIN_32 ) ) {
U32 const extraBits = ofBits - MIN ( ofBits , 32 - seqState - > DStream . bitsConsumed ) ;
offset = ofBase + ( BIT_readBitsFast ( & seqState - > DStream , ofBits - extraBits ) < < extraBits ) ;
BIT_reloadDStream ( & seqState - > DStream ) ;
if ( extraBits ) offset + = BIT_readBitsFast ( & seqState - > DStream , extraBits ) ;
assert ( extraBits < = LONG_OFFSETS_MAX_EXTRA_BITS_32 ) ; /* to avoid another reload */
} else {
offset = ofBase + BIT_readBitsFast ( & seqState - > DStream , ofBits /*>0*/ ) ; /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
if ( MEM_32bits ( ) ) BIT_reloadDStream ( & seqState - > DStream ) ;
}
}
if ( ofBits < = 1 ) {
offset + = ( llBase = = 0 ) ;
if ( offset ) {
size_t temp = ( offset = = 3 ) ? seqState - > prevOffset [ 0 ] - 1 : seqState - > prevOffset [ offset ] ;
temp + = ! temp ; /* 0 is not valid; input is corrupted; force offset to 1 */
if ( offset ! = 1 ) seqState - > prevOffset [ 2 ] = seqState - > prevOffset [ 1 ] ;
seqState - > prevOffset [ 1 ] = seqState - > prevOffset [ 0 ] ;
seqState - > prevOffset [ 0 ] = offset = temp ;
} else { /* offset == 0 */
offset = seqState - > prevOffset [ 0 ] ;
}
} else {
seqState - > prevOffset [ 2 ] = seqState - > prevOffset [ 1 ] ;
seqState - > prevOffset [ 1 ] = seqState - > prevOffset [ 0 ] ;
seqState - > prevOffset [ 0 ] = offset ;
}
seq . offset = offset ;
}
seq . matchLength = mlBase
+ ( ( mlBits > 0 ) ? BIT_readBitsFast ( & seqState - > DStream , mlBits /*>0*/ ) : 0 ) ; /* <= 16 bits */
if ( MEM_32bits ( ) & & ( mlBits + llBits > = STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 ) )
BIT_reloadDStream ( & seqState - > DStream ) ;
if ( MEM_64bits ( ) & & ( totalBits > = STREAM_ACCUMULATOR_MIN_64 - ( LLFSELog + MLFSELog + OffFSELog ) ) )
BIT_reloadDStream ( & seqState - > DStream ) ;
/* Ensure there are enough bits to read the rest of data in 64-bit mode. */
ZSTD_STATIC_ASSERT ( 16 + LLFSELog + MLFSELog + OffFSELog < STREAM_ACCUMULATOR_MIN_64 ) ;
seq . litLength = llBase
+ ( ( llBits > 0 ) ? BIT_readBitsFast ( & seqState - > DStream , llBits /*>0*/ ) : 0 ) ; /* <= 16 bits */
if ( MEM_32bits ( ) )
BIT_reloadDStream ( & seqState - > DStream ) ;
DEBUGLOG ( 6 , " seq: litL=%u, matchL=%u, offset=%u " ,
( U32 ) seq . litLength , ( U32 ) seq . matchLength , ( U32 ) seq . offset ) ;
/* ANS state update */
ZSTD_updateFseState ( & seqState - > stateLL , & seqState - > DStream ) ; /* <= 9 bits */
ZSTD_updateFseState ( & seqState - > stateML , & seqState - > DStream ) ; /* <= 9 bits */
if ( MEM_32bits ( ) ) BIT_reloadDStream ( & seqState - > DStream ) ; /* <= 18 bits */
ZSTD_updateFseState ( & seqState - > stateOffb , & seqState - > DStream ) ; /* <= 8 bits */
return seq ;
}
2018-03-10 03:35:57 +00:00
FORCE_INLINE_TEMPLATE size_t
ZSTD_decompressSequences_body ( ZSTD_DCtx * dctx ,
void * dst , size_t maxDstSize ,
const void * seqStart , size_t seqSize , int nbSeq ,
const ZSTD_longOffset_e isLongOffset )
2018-03-10 02:03:25 +00:00
{
2018-03-10 03:35:57 +00:00
const BYTE * ip = ( const BYTE * ) seqStart ;
const BYTE * const iend = ip + seqSize ;
BYTE * const ostart = ( BYTE * const ) dst ;
BYTE * const oend = ostart + maxDstSize ;
BYTE * op = ostart ;
const BYTE * litPtr = dctx - > litPtr ;
const BYTE * const litEnd = litPtr + dctx - > litSize ;
2018-06-01 22:18:32 +00:00
const BYTE * const prefixStart = ( const BYTE * ) ( dctx - > prefixStart ) ;
const BYTE * const vBase = ( const BYTE * ) ( dctx - > virtualStart ) ;
2018-03-10 03:35:57 +00:00
const BYTE * const dictEnd = ( const BYTE * ) ( dctx - > dictEnd ) ;
2018-06-01 22:18:32 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressSequences_body " ) ;
2018-03-10 03:35:57 +00:00
/* Regen sequences */
if ( nbSeq ) {
seqState_t seqState ;
dctx - > fseEntropy = 1 ;
{ U32 i ; for ( i = 0 ; i < ZSTD_REP_NUM ; i + + ) seqState . prevOffset [ i ] = dctx - > entropy . rep [ i ] ; }
CHECK_E ( BIT_initDStream ( & seqState . DStream , ip , iend - ip ) , corruption_detected ) ;
ZSTD_initFseState ( & seqState . stateLL , & seqState . DStream , dctx - > LLTptr ) ;
ZSTD_initFseState ( & seqState . stateOffb , & seqState . DStream , dctx - > OFTptr ) ;
ZSTD_initFseState ( & seqState . stateML , & seqState . DStream , dctx - > MLTptr ) ;
for ( ; ( BIT_reloadDStream ( & ( seqState . DStream ) ) < = BIT_DStream_completed ) & & nbSeq ; ) {
nbSeq - - ;
2018-03-10 03:48:06 +00:00
{ seq_t const sequence = ZSTD_decodeSequence ( & seqState , isLongOffset ) ;
2018-06-01 22:18:32 +00:00
size_t const oneSeqSize = ZSTD_execSequence ( op , oend , sequence , & litPtr , litEnd , prefixStart , vBase , dictEnd ) ;
2018-03-10 03:35:57 +00:00
DEBUGLOG ( 6 , " regenerated sequence size : %u " , ( U32 ) oneSeqSize ) ;
if ( ZSTD_isError ( oneSeqSize ) ) return oneSeqSize ;
op + = oneSeqSize ;
} }
/* check if reached exact end */
2018-06-01 22:18:32 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i " , nbSeq ) ;
2018-03-10 03:35:57 +00:00
if ( nbSeq ) return ERROR ( corruption_detected ) ;
/* save reps for next block */
{ U32 i ; for ( i = 0 ; i < ZSTD_REP_NUM ; i + + ) dctx - > entropy . rep [ i ] = ( U32 ) ( seqState . prevOffset [ i ] ) ; }
}
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr ;
if ( lastLLSize > ( size_t ) ( oend - op ) ) return ERROR ( dstSize_tooSmall ) ;
memcpy ( op , litPtr , lastLLSize ) ;
op + = lastLLSize ;
}
return op - ostart ;
2018-03-10 02:03:25 +00:00
}
2018-03-10 03:35:57 +00:00
static size_t
ZSTD_decompressSequences_default ( ZSTD_DCtx * dctx ,
void * dst , size_t maxDstSize ,
const void * seqStart , size_t seqSize , int nbSeq ,
const ZSTD_longOffset_e isLongOffset )
{
return ZSTD_decompressSequences_body ( dctx , dst , maxDstSize , seqStart , seqSize , nbSeq , isLongOffset ) ;
}
2018-03-10 02:11:14 +00:00
FORCE_INLINE_TEMPLATE seq_t
2018-03-10 03:48:06 +00:00
ZSTD_decodeSequenceLong ( seqState_t * seqState , ZSTD_longOffset_e const longOffsets )
2018-03-10 02:11:14 +00:00
{
seq_t seq ;
U32 const llBits = seqState - > stateLL . table [ seqState - > stateLL . state ] . nbAdditionalBits ;
U32 const mlBits = seqState - > stateML . table [ seqState - > stateML . state ] . nbAdditionalBits ;
U32 const ofBits = seqState - > stateOffb . table [ seqState - > stateOffb . state ] . nbAdditionalBits ;
U32 const totalBits = llBits + mlBits + ofBits ;
U32 const llBase = seqState - > stateLL . table [ seqState - > stateLL . state ] . baseValue ;
U32 const mlBase = seqState - > stateML . table [ seqState - > stateML . state ] . baseValue ;
U32 const ofBase = seqState - > stateOffb . table [ seqState - > stateOffb . state ] . baseValue ;
/* sequence */
{ size_t offset ;
if ( ! ofBits )
offset = 0 ;
else {
ZSTD_STATIC_ASSERT ( ZSTD_lo_isLongOffset = = 1 ) ;
ZSTD_STATIC_ASSERT ( LONG_OFFSETS_MAX_EXTRA_BITS_32 = = 5 ) ;
assert ( ofBits < = MaxOff ) ;
if ( MEM_32bits ( ) & & longOffsets ) {
U32 const extraBits = ofBits - MIN ( ofBits , STREAM_ACCUMULATOR_MIN_32 - 1 ) ;
offset = ofBase + ( BIT_readBitsFast ( & seqState - > DStream , ofBits - extraBits ) < < extraBits ) ;
if ( MEM_32bits ( ) | | extraBits ) BIT_reloadDStream ( & seqState - > DStream ) ;
if ( extraBits ) offset + = BIT_readBitsFast ( & seqState - > DStream , extraBits ) ;
} else {
offset = ofBase + BIT_readBitsFast ( & seqState - > DStream , ofBits ) ; /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
if ( MEM_32bits ( ) ) BIT_reloadDStream ( & seqState - > DStream ) ;
}
}
if ( ofBits < = 1 ) {
offset + = ( llBase = = 0 ) ;
if ( offset ) {
size_t temp = ( offset = = 3 ) ? seqState - > prevOffset [ 0 ] - 1 : seqState - > prevOffset [ offset ] ;
temp + = ! temp ; /* 0 is not valid; input is corrupted; force offset to 1 */
if ( offset ! = 1 ) seqState - > prevOffset [ 2 ] = seqState - > prevOffset [ 1 ] ;
seqState - > prevOffset [ 1 ] = seqState - > prevOffset [ 0 ] ;
seqState - > prevOffset [ 0 ] = offset = temp ;
} else {
offset = seqState - > prevOffset [ 0 ] ;
}
} else {
seqState - > prevOffset [ 2 ] = seqState - > prevOffset [ 1 ] ;
seqState - > prevOffset [ 1 ] = seqState - > prevOffset [ 0 ] ;
seqState - > prevOffset [ 0 ] = offset ;
}
seq . offset = offset ;
}
seq . matchLength = mlBase + ( ( mlBits > 0 ) ? BIT_readBitsFast ( & seqState - > DStream , mlBits ) : 0 ) ; /* <= 16 bits */
if ( MEM_32bits ( ) & & ( mlBits + llBits > = STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 ) )
BIT_reloadDStream ( & seqState - > DStream ) ;
if ( MEM_64bits ( ) & & ( totalBits > = STREAM_ACCUMULATOR_MIN_64 - ( LLFSELog + MLFSELog + OffFSELog ) ) )
BIT_reloadDStream ( & seqState - > DStream ) ;
/* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
ZSTD_STATIC_ASSERT ( 16 + LLFSELog + MLFSELog + OffFSELog < STREAM_ACCUMULATOR_MIN_64 ) ;
seq . litLength = llBase + ( ( llBits > 0 ) ? BIT_readBitsFast ( & seqState - > DStream , llBits ) : 0 ) ; /* <= 16 bits */
if ( MEM_32bits ( ) )
BIT_reloadDStream ( & seqState - > DStream ) ;
{ size_t const pos = seqState - > pos + seq . litLength ;
const BYTE * const matchBase = ( seq . offset > pos ) ? seqState - > dictEnd : seqState - > prefixStart ;
seq . match = matchBase + pos - seq . offset ; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
* No consequence though : no memory access will occur , overly large offset will be detected in ZSTD_execSequenceLong ( ) */
seqState - > pos = pos + seq . matchLength ;
}
/* ANS state update */
ZSTD_updateFseState ( & seqState - > stateLL , & seqState - > DStream ) ; /* <= 9 bits */
ZSTD_updateFseState ( & seqState - > stateML , & seqState - > DStream ) ; /* <= 9 bits */
if ( MEM_32bits ( ) ) BIT_reloadDStream ( & seqState - > DStream ) ; /* <= 18 bits */
ZSTD_updateFseState ( & seqState - > stateOffb , & seqState - > DStream ) ; /* <= 8 bits */
return seq ;
}
2018-03-10 03:48:06 +00:00
FORCE_INLINE_TEMPLATE size_t
ZSTD_decompressSequencesLong_body (
ZSTD_DCtx * dctx ,
void * dst , size_t maxDstSize ,
const void * seqStart , size_t seqSize , int nbSeq ,
const ZSTD_longOffset_e isLongOffset )
2018-03-10 02:11:14 +00:00
{
2018-03-10 03:48:06 +00:00
const BYTE * ip = ( const BYTE * ) seqStart ;
const BYTE * const iend = ip + seqSize ;
BYTE * const ostart = ( BYTE * const ) dst ;
BYTE * const oend = ostart + maxDstSize ;
BYTE * op = ostart ;
const BYTE * litPtr = dctx - > litPtr ;
const BYTE * const litEnd = litPtr + dctx - > litSize ;
2018-06-01 22:18:32 +00:00
const BYTE * const prefixStart = ( const BYTE * ) ( dctx - > prefixStart ) ;
const BYTE * const dictStart = ( const BYTE * ) ( dctx - > virtualStart ) ;
2018-03-10 03:48:06 +00:00
const BYTE * const dictEnd = ( const BYTE * ) ( dctx - > dictEnd ) ;
2018-03-10 02:11:14 +00:00
2018-03-10 03:48:06 +00:00
/* Regen sequences */
if ( nbSeq ) {
# define STORED_SEQS 4
# define STOSEQ_MASK (STORED_SEQS-1)
# define ADVANCED_SEQS 4
seq_t sequences [ STORED_SEQS ] ;
int const seqAdvance = MIN ( nbSeq , ADVANCED_SEQS ) ;
seqState_t seqState ;
int seqNb ;
dctx - > fseEntropy = 1 ;
{ U32 i ; for ( i = 0 ; i < ZSTD_REP_NUM ; i + + ) seqState . prevOffset [ i ] = dctx - > entropy . rep [ i ] ; }
seqState . prefixStart = prefixStart ;
seqState . pos = ( size_t ) ( op - prefixStart ) ;
seqState . dictEnd = dictEnd ;
CHECK_E ( BIT_initDStream ( & seqState . DStream , ip , iend - ip ) , corruption_detected ) ;
ZSTD_initFseState ( & seqState . stateLL , & seqState . DStream , dctx - > LLTptr ) ;
ZSTD_initFseState ( & seqState . stateOffb , & seqState . DStream , dctx - > OFTptr ) ;
ZSTD_initFseState ( & seqState . stateML , & seqState . DStream , dctx - > MLTptr ) ;
2018-03-10 02:03:25 +00:00
2018-03-10 03:48:06 +00:00
/* prepare in advance */
for ( seqNb = 0 ; ( BIT_reloadDStream ( & seqState . DStream ) < = BIT_DStream_completed ) & & ( seqNb < seqAdvance ) ; seqNb + + ) {
sequences [ seqNb ] = ZSTD_decodeSequenceLong ( & seqState , isLongOffset ) ;
}
if ( seqNb < seqAdvance ) return ERROR ( corruption_detected ) ;
/* decode and decompress */
for ( ; ( BIT_reloadDStream ( & ( seqState . DStream ) ) < = BIT_DStream_completed ) & & ( seqNb < nbSeq ) ; seqNb + + ) {
seq_t const sequence = ZSTD_decodeSequenceLong ( & seqState , isLongOffset ) ;
size_t const oneSeqSize = ZSTD_execSequenceLong ( op , oend , sequences [ ( seqNb - ADVANCED_SEQS ) & STOSEQ_MASK ] , & litPtr , litEnd , prefixStart , dictStart , dictEnd ) ;
if ( ZSTD_isError ( oneSeqSize ) ) return oneSeqSize ;
PREFETCH ( sequence . match ) ; /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
sequences [ seqNb & STOSEQ_MASK ] = sequence ;
op + = oneSeqSize ;
}
if ( seqNb < nbSeq ) return ERROR ( corruption_detected ) ;
/* finish queue */
seqNb - = seqAdvance ;
for ( ; seqNb < nbSeq ; seqNb + + ) {
size_t const oneSeqSize = ZSTD_execSequenceLong ( op , oend , sequences [ seqNb & STOSEQ_MASK ] , & litPtr , litEnd , prefixStart , dictStart , dictEnd ) ;
if ( ZSTD_isError ( oneSeqSize ) ) return oneSeqSize ;
op + = oneSeqSize ;
}
2015-11-19 16:13:19 +00:00
2018-03-10 03:48:06 +00:00
/* save reps for next block */
{ U32 i ; for ( i = 0 ; i < ZSTD_REP_NUM ; i + + ) dctx - > entropy . rep [ i ] = ( U32 ) ( seqState . prevOffset [ i ] ) ; }
# undef STORED_SEQS
# undef STOSEQ_MASK
# undef ADVANCED_SEQS
}
2015-11-11 12:43:58 +00:00
2018-03-10 03:48:06 +00:00
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr ;
if ( lastLLSize > ( size_t ) ( oend - op ) ) return ERROR ( dstSize_tooSmall ) ;
memcpy ( op , litPtr , lastLLSize ) ;
op + = lastLLSize ;
}
return op - ostart ;
}
static size_t
ZSTD_decompressSequencesLong_default ( ZSTD_DCtx * dctx ,
void * dst , size_t maxDstSize ,
const void * seqStart , size_t seqSize , int nbSeq ,
const ZSTD_longOffset_e isLongOffset )
2018-03-10 02:03:25 +00:00
{
2018-03-10 03:48:06 +00:00
return ZSTD_decompressSequencesLong_body ( dctx , dst , maxDstSize , seqStart , seqSize , nbSeq , isLongOffset ) ;
2018-03-10 02:03:25 +00:00
}
2018-03-10 03:48:06 +00:00
# if DYNAMIC_BMI2
2018-03-10 03:35:57 +00:00
static TARGET_ATTRIBUTE ( " bmi2 " ) size_t
ZSTD_decompressSequences_bmi2 ( ZSTD_DCtx * dctx ,
void * dst , size_t maxDstSize ,
const void * seqStart , size_t seqSize , int nbSeq ,
const ZSTD_longOffset_e isLongOffset )
2018-03-10 02:11:14 +00:00
{
2018-03-10 03:35:57 +00:00
return ZSTD_decompressSequences_body ( dctx , dst , maxDstSize , seqStart , seqSize , nbSeq , isLongOffset ) ;
2018-03-10 02:11:14 +00:00
}
2018-03-10 03:48:06 +00:00
static TARGET_ATTRIBUTE ( " bmi2 " ) size_t
ZSTD_decompressSequencesLong_bmi2 ( ZSTD_DCtx * dctx ,
void * dst , size_t maxDstSize ,
const void * seqStart , size_t seqSize , int nbSeq ,
const ZSTD_longOffset_e isLongOffset )
{
return ZSTD_decompressSequencesLong_body ( dctx , dst , maxDstSize , seqStart , seqSize , nbSeq , isLongOffset ) ;
}
2018-02-03 02:03:09 +00:00
# endif
typedef size_t ( * ZSTD_decompressSequences_t ) (
2018-03-05 21:08:59 +00:00
ZSTD_DCtx * dctx , void * dst , size_t maxDstSize ,
const void * seqStart , size_t seqSize , int nbSeq ,
const ZSTD_longOffset_e isLongOffset ) ;
2015-11-11 12:43:58 +00:00
2018-02-03 02:03:09 +00:00
static size_t ZSTD_decompressSequences ( ZSTD_DCtx * dctx , void * dst , size_t maxDstSize ,
2018-03-05 21:08:59 +00:00
const void * seqStart , size_t seqSize , int nbSeq ,
2018-02-03 02:03:09 +00:00
const ZSTD_longOffset_e isLongOffset )
{
2018-03-05 23:12:10 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressSequences " ) ;
2018-02-03 02:03:09 +00:00
# if DYNAMIC_BMI2
if ( dctx - > bmi2 ) {
2018-03-05 21:08:59 +00:00
return ZSTD_decompressSequences_bmi2 ( dctx , dst , maxDstSize , seqStart , seqSize , nbSeq , isLongOffset ) ;
2018-02-03 02:03:09 +00:00
}
# endif
2018-03-05 21:08:59 +00:00
return ZSTD_decompressSequences_default ( dctx , dst , maxDstSize , seqStart , seqSize , nbSeq , isLongOffset ) ;
2015-11-11 12:43:58 +00:00
}
2018-03-05 21:08:59 +00:00
static size_t ZSTD_decompressSequencesLong ( ZSTD_DCtx * dctx ,
void * dst , size_t maxDstSize ,
const void * seqStart , size_t seqSize , int nbSeq ,
const ZSTD_longOffset_e isLongOffset )
2018-02-03 02:03:09 +00:00
{
2018-03-05 23:12:10 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressSequencesLong " ) ;
2018-02-03 02:03:09 +00:00
# if DYNAMIC_BMI2
if ( dctx - > bmi2 ) {
2018-03-05 21:08:59 +00:00
return ZSTD_decompressSequencesLong_bmi2 ( dctx , dst , maxDstSize , seqStart , seqSize , nbSeq , isLongOffset ) ;
2018-02-03 02:03:09 +00:00
}
# endif
2018-03-05 21:08:59 +00:00
return ZSTD_decompressSequencesLong_default ( dctx , dst , maxDstSize , seqStart , seqSize , nbSeq , isLongOffset ) ;
2018-02-03 02:03:09 +00:00
}
2015-11-11 12:43:58 +00:00
2018-03-07 02:15:26 +00:00
/* ZSTD_getLongOffsetsShare() :
* condition : offTable must be valid
* @ return : " share " of long offsets ( arbitrarily defined as > ( 1 < < 23 ) )
* compared to maximum possible of ( 1 < < OffFSELog ) */
2018-02-09 20:33:28 +00:00
static unsigned
2018-03-05 21:08:59 +00:00
ZSTD_getLongOffsetsShare ( const ZSTD_seqSymbol * offTable )
2018-02-09 20:33:28 +00:00
{
2018-02-09 21:10:32 +00:00
const void * ptr = offTable ;
2018-03-05 23:12:10 +00:00
U32 const tableLog = ( ( const ZSTD_seqSymbol_header * ) ptr ) [ 0 ] . tableLog ;
2018-03-05 21:08:59 +00:00
const ZSTD_seqSymbol * table = offTable + 1 ;
2018-02-09 20:33:28 +00:00
U32 const max = 1 < < tableLog ;
U32 u , total = 0 ;
2018-03-05 23:12:10 +00:00
DEBUGLOG ( 5 , " ZSTD_getLongOffsetsShare: (tableLog=%u) " , tableLog ) ;
2018-02-09 20:33:28 +00:00
2018-03-05 23:12:10 +00:00
assert ( max < = ( 1 < < OffFSELog ) ) ; /* max not too large */
for ( u = 0 ; u < max ; u + + ) {
if ( table [ u ] . nbAdditionalBits > 22 ) total + = 1 ;
}
2018-02-09 20:33:28 +00:00
2018-03-05 21:08:59 +00:00
assert ( tableLog < = OffFSELog ) ;
total < < = ( OffFSELog - tableLog ) ; /* scale to OffFSELog */
2018-02-09 20:33:28 +00:00
return total ;
}
2016-01-09 01:00:10 +00:00
static size_t ZSTD_decompressBlock_internal ( ZSTD_DCtx * dctx ,
2016-02-03 11:39:34 +00:00
void * dst , size_t dstCapacity ,
2017-09-16 00:22:38 +00:00
const void * src , size_t srcSize , const int frame )
2016-02-03 11:39:34 +00:00
{ /* blockType == blockCompressed */
2015-11-11 12:43:58 +00:00
const BYTE * ip = ( const BYTE * ) src ;
2017-09-16 00:22:38 +00:00
/* isLongOffset must be true if there are long offsets.
* Offsets are long if they are larger than 2 ^ STREAM_ACCUMULATOR_MIN .
* We don ' t expect that to be the case in 64 - bit mode .
2018-09-12 00:23:44 +00:00
* In block mode , window size is not known , so we have to be conservative .
* ( note : but it could be evaluated from current - lowLimit )
2017-09-16 00:22:38 +00:00
*/
ZSTD_longOffset_e const isLongOffset = ( ZSTD_longOffset_e ) ( MEM_32bits ( ) & & ( ! frame | | dctx - > fParams . windowSize > ( 1ULL < < STREAM_ACCUMULATOR_MIN ) ) ) ;
2017-10-07 22:19:52 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressBlock_internal (size : %u) " , ( U32 ) srcSize ) ;
2016-02-03 11:39:34 +00:00
2017-05-19 17:51:30 +00:00
if ( srcSize > = ZSTD_BLOCKSIZE_MAX ) return ERROR ( srcSize_wrong ) ;
2015-11-11 12:43:58 +00:00
2017-03-03 01:09:21 +00:00
/* Decode literals section */
2016-04-08 00:02:12 +00:00
{ size_t const litCSize = ZSTD_decodeLiteralsBlock ( dctx , src , srcSize ) ;
2017-06-28 18:09:43 +00:00
DEBUGLOG ( 5 , " ZSTD_decodeLiteralsBlock : %u " , ( U32 ) litCSize ) ;
2016-04-08 00:02:12 +00:00
if ( ZSTD_isError ( litCSize ) ) return litCSize ;
ip + = litCSize ;
srcSize - = litCSize ;
}
2018-02-09 20:33:28 +00:00
2018-03-05 21:08:59 +00:00
/* Build Decoding Tables */
{ int nbSeq ;
size_t const seqHSize = ZSTD_decodeSeqHeaders ( dctx , & nbSeq , ip , srcSize ) ;
if ( ZSTD_isError ( seqHSize ) ) return seqHSize ;
ip + = seqHSize ;
srcSize - = seqHSize ;
2018-03-07 02:15:26 +00:00
if ( ( ! frame | | dctx - > fParams . windowSize > ( 1 < < 24 ) )
2018-03-06 09:50:19 +00:00
& & ( nbSeq > 0 ) ) { /* could probably use a larger nbSeq limit */
2018-03-05 21:08:59 +00:00
U32 const shareLongOffsets = ZSTD_getLongOffsetsShare ( dctx - > OFTptr ) ;
2018-03-07 02:15:26 +00:00
U32 const minShare = MEM_64bits ( ) ? 7 : 20 ; /* heuristic values, correspond to 2.73% and 7.81% */
2018-03-05 21:08:59 +00:00
if ( shareLongOffsets > = minShare )
return ZSTD_decompressSequencesLong ( dctx , dst , dstCapacity , ip , srcSize , nbSeq , isLongOffset ) ;
}
2018-02-09 20:33:28 +00:00
2018-03-05 21:08:59 +00:00
return ZSTD_decompressSequences ( dctx , dst , dstCapacity , ip , srcSize , nbSeq , isLongOffset ) ;
}
2015-11-11 12:43:58 +00:00
}
2016-11-29 23:30:23 +00:00
static void ZSTD_checkContinuity ( ZSTD_DCtx * dctx , const void * dst )
{
if ( dst ! = dctx - > previousDstEnd ) { /* not contiguous */
dctx - > dictEnd = dctx - > previousDstEnd ;
2018-06-01 22:18:32 +00:00
dctx - > virtualStart = ( const char * ) dst - ( ( const char * ) ( dctx - > previousDstEnd ) - ( const char * ) ( dctx - > prefixStart ) ) ;
dctx - > prefixStart = dst ;
2016-11-29 23:30:23 +00:00
dctx - > previousDstEnd = dst ;
}
}
2016-01-09 01:00:10 +00:00
size_t ZSTD_decompressBlock ( ZSTD_DCtx * dctx ,
2016-02-03 11:39:34 +00:00
void * dst , size_t dstCapacity ,
2016-01-09 01:00:10 +00:00
const void * src , size_t srcSize )
{
2016-06-26 23:31:35 +00:00
size_t dSize ;
2016-01-09 01:00:10 +00:00
ZSTD_checkContinuity ( dctx , dst ) ;
2017-09-16 00:22:38 +00:00
dSize = ZSTD_decompressBlock_internal ( dctx , dst , dstCapacity , src , srcSize , /* frame */ 0 ) ;
2016-06-26 23:31:35 +00:00
dctx - > previousDstEnd = ( char * ) dst + dSize ;
return dSize ;
2016-01-09 01:00:10 +00:00
}
2016-07-06 18:30:52 +00:00
/** ZSTD_insertBlock() :
insert ` src ` block into ` dctx ` history . Useful to track uncompressed blocks . */
ZSTDLIB_API size_t ZSTD_insertBlock ( ZSTD_DCtx * dctx , const void * blockStart , size_t blockSize )
{
ZSTD_checkContinuity ( dctx , blockStart ) ;
dctx - > previousDstEnd = ( const char * ) blockStart + blockSize ;
return blockSize ;
}
2017-09-25 21:26:26 +00:00
static size_t ZSTD_generateNxBytes ( void * dst , size_t dstCapacity , BYTE byte , size_t length )
2016-06-06 17:52:35 +00:00
{
if ( length > dstCapacity ) return ERROR ( dstSize_tooSmall ) ;
memset ( dst , byte , length ) ;
return length ;
}
2017-02-22 20:12:32 +00:00
/** ZSTD_findFrameCompressedSize() :
2017-02-10 19:38:57 +00:00
* compatible with legacy mode
2017-02-22 20:27:15 +00:00
* ` src ` must point to the start of a ZSTD frame , ZSTD legacy frame , or skippable frame
2017-02-10 19:38:57 +00:00
* ` srcSize ` must be at least as large as the frame contained
* @ return : the compressed size of the frame starting at ` src ` */
2017-02-22 20:12:32 +00:00
size_t ZSTD_findFrameCompressedSize ( const void * src , size_t srcSize )
2017-02-07 21:50:09 +00:00
{
2017-03-13 21:32:30 +00:00
# if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
2017-07-06 01:10:07 +00:00
if ( ZSTD_isLegacy ( src , srcSize ) )
return ZSTD_findFrameCompressedSizeLegacy ( src , srcSize ) ;
2017-02-10 19:38:57 +00:00
# endif
2017-07-06 01:10:07 +00:00
if ( ( srcSize > = ZSTD_skippableHeaderSize )
& & ( MEM_readLE32 ( src ) & 0xFFFFFFF0U ) = = ZSTD_MAGIC_SKIPPABLE_START ) {
2018-08-14 19:56:21 +00:00
return ZSTD_skippableHeaderSize + MEM_readLE32 ( ( const BYTE * ) src + ZSTD_FRAMEIDSIZE ) ;
2017-02-22 20:12:32 +00:00
} else {
2017-02-10 19:38:57 +00:00
const BYTE * ip = ( const BYTE * ) src ;
const BYTE * const ipstart = ip ;
size_t remainingSize = srcSize ;
2017-07-07 22:21:35 +00:00
ZSTD_frameHeader zfh ;
2017-02-07 21:50:09 +00:00
2017-07-07 22:51:24 +00:00
/* Extract Frame Header */
2017-07-07 22:21:35 +00:00
{ size_t const ret = ZSTD_getFrameHeader ( & zfh , src , srcSize ) ;
2017-02-10 19:38:57 +00:00
if ( ZSTD_isError ( ret ) ) return ret ;
if ( ret > 0 ) return ERROR ( srcSize_wrong ) ;
}
2017-02-07 21:50:09 +00:00
2017-07-07 22:51:24 +00:00
ip + = zfh . headerSize ;
remainingSize - = zfh . headerSize ;
2017-02-07 21:50:09 +00:00
2017-02-10 19:38:57 +00:00
/* Loop on each block */
while ( 1 ) {
blockProperties_t blockProperties ;
size_t const cBlockSize = ZSTD_getcBlockSize ( ip , remainingSize , & blockProperties ) ;
if ( ZSTD_isError ( cBlockSize ) ) return cBlockSize ;
2017-02-07 21:50:09 +00:00
2017-07-06 01:10:07 +00:00
if ( ZSTD_blockHeaderSize + cBlockSize > remainingSize )
return ERROR ( srcSize_wrong ) ;
2017-02-07 21:50:09 +00:00
2017-02-10 19:38:57 +00:00
ip + = ZSTD_blockHeaderSize + cBlockSize ;
remainingSize - = ZSTD_blockHeaderSize + cBlockSize ;
2017-02-07 21:50:09 +00:00
2017-02-10 19:38:57 +00:00
if ( blockProperties . lastBlock ) break ;
}
2017-02-07 21:50:09 +00:00
2017-07-07 22:21:35 +00:00
if ( zfh . checksumFlag ) { /* Final frame content checksum */
2017-02-10 19:38:57 +00:00
if ( remainingSize < 4 ) return ERROR ( srcSize_wrong ) ;
ip + = 4 ;
}
2017-02-07 21:50:09 +00:00
2017-02-10 19:38:57 +00:00
return ip - ipstart ;
}
2017-02-07 21:50:09 +00:00
}
2016-06-06 17:52:35 +00:00
2016-05-06 14:43:23 +00:00
/*! ZSTD_decompressFrame() :
2017-02-28 09:15:28 +00:00
* @ dctx must be properly initialized */
2016-03-11 20:58:04 +00:00
static size_t ZSTD_decompressFrame ( ZSTD_DCtx * dctx ,
2017-07-06 01:10:07 +00:00
void * dst , size_t dstCapacity ,
const void * * srcPtr , size_t * srcSizePtr )
2015-11-11 12:43:58 +00:00
{
2017-02-08 00:16:55 +00:00
const BYTE * ip = ( const BYTE * ) ( * srcPtr ) ;
2015-11-11 12:43:58 +00:00
BYTE * const ostart = ( BYTE * const ) dst ;
2016-03-19 13:14:31 +00:00
BYTE * const oend = ostart + dstCapacity ;
2016-06-03 13:41:51 +00:00
BYTE * op = ostart ;
2017-02-08 00:16:55 +00:00
size_t remainingSize = * srcSizePtr ;
2015-11-11 12:43:58 +00:00
2016-03-19 13:14:31 +00:00
/* check */
2017-07-06 01:10:07 +00:00
if ( remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize )
return ERROR ( srcSize_wrong ) ;
2016-03-19 13:14:31 +00:00
/* Frame Header */
2017-02-08 00:16:55 +00:00
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize ( ip , ZSTD_frameHeaderSize_prefix ) ;
2015-11-25 13:42:45 +00:00
if ( ZSTD_isError ( frameHeaderSize ) ) return frameHeaderSize ;
2017-07-06 01:10:07 +00:00
if ( remainingSize < frameHeaderSize + ZSTD_blockHeaderSize )
return ERROR ( srcSize_wrong ) ;
CHECK_F ( ZSTD_decodeFrameHeader ( dctx , ip , frameHeaderSize ) ) ;
2015-11-25 13:42:45 +00:00
ip + = frameHeaderSize ; remainingSize - = frameHeaderSize ;
}
2015-11-11 12:43:58 +00:00
/* Loop on each block */
2016-03-19 13:14:31 +00:00
while ( 1 ) {
2016-06-10 22:23:43 +00:00
size_t decodedSize ;
2016-06-03 13:41:51 +00:00
blockProperties_t blockProperties ;
2016-07-26 17:42:20 +00:00
size_t const cBlockSize = ZSTD_getcBlockSize ( ip , remainingSize , & blockProperties ) ;
2015-11-11 12:43:58 +00:00
if ( ZSTD_isError ( cBlockSize ) ) return cBlockSize ;
ip + = ZSTD_blockHeaderSize ;
remainingSize - = ZSTD_blockHeaderSize ;
if ( cBlockSize > remainingSize ) return ERROR ( srcSize_wrong ) ;
switch ( blockProperties . blockType )
{
case bt_compressed :
2017-09-16 00:22:38 +00:00
decodedSize = ZSTD_decompressBlock_internal ( dctx , op , oend - op , ip , cBlockSize , /* frame */ 1 ) ;
2015-11-11 12:43:58 +00:00
break ;
case bt_raw :
2015-11-12 15:19:30 +00:00
decodedSize = ZSTD_copyRawBlock ( op , oend - op , ip , cBlockSize ) ;
2015-11-11 12:43:58 +00:00
break ;
case bt_rle :
2016-07-11 01:12:17 +00:00
decodedSize = ZSTD_generateNxBytes ( op , oend - op , * ip , blockProperties . origSize ) ;
2015-11-11 12:43:58 +00:00
break ;
2016-07-27 22:55:43 +00:00
case bt_reserved :
2015-11-11 12:43:58 +00:00
default :
2016-07-27 22:55:43 +00:00
return ERROR ( corruption_detected ) ;
2015-11-11 12:43:58 +00:00
}
if ( ZSTD_isError ( decodedSize ) ) return decodedSize ;
2017-07-06 01:10:07 +00:00
if ( dctx - > fParams . checksumFlag )
XXH64_update ( & dctx - > xxhState , op , decodedSize ) ;
2015-11-11 12:43:58 +00:00
op + = decodedSize ;
ip + = cBlockSize ;
remainingSize - = cBlockSize ;
2016-07-27 22:55:43 +00:00
if ( blockProperties . lastBlock ) break ;
2015-11-11 12:43:58 +00:00
}
2017-09-21 23:21:10 +00:00
if ( dctx - > fParams . frameContentSize ! = ZSTD_CONTENTSIZE_UNKNOWN ) {
if ( ( U64 ) ( op - ostart ) ! = dctx - > fParams . frameContentSize ) {
return ERROR ( corruption_detected ) ;
} }
2017-07-06 01:10:07 +00:00
if ( dctx - > fParams . checksumFlag ) { /* Frame content checksum verification */
2016-07-27 22:55:43 +00:00
U32 const checkCalc = ( U32 ) XXH64_digest ( & dctx - > xxhState ) ;
U32 checkRead ;
if ( remainingSize < 4 ) return ERROR ( checksum_wrong ) ;
checkRead = MEM_readLE32 ( ip ) ;
if ( checkRead ! = checkCalc ) return ERROR ( checksum_wrong ) ;
2017-02-08 00:16:55 +00:00
ip + = 4 ;
2016-07-27 22:55:43 +00:00
remainingSize - = 4 ;
}
2017-02-28 09:15:28 +00:00
/* Allow caller to get size read */
2017-02-08 00:16:55 +00:00
* srcPtr = ip ;
* srcSizePtr = remainingSize ;
2015-11-11 12:43:58 +00:00
return op - ostart ;
}
2017-02-08 00:16:55 +00:00
static size_t ZSTD_decompressMultiFrame ( ZSTD_DCtx * dctx ,
void * dst , size_t dstCapacity ,
const void * src , size_t srcSize ,
2017-07-06 01:10:07 +00:00
const void * dict , size_t dictSize ,
2017-02-26 22:43:07 +00:00
const ZSTD_DDict * ddict )
2017-02-08 00:16:55 +00:00
{
void * const dststart = dst ;
decompress: changed error code when input is too large
ZSTD_decompress() can decompress multiple frames sent as a single input.
But the input size must be the exact sum of all compressed frames, no more.
In the case of a mistake on srcSize, being larger than required,
ZSTD_decompress() will try to decompress a new frame after current one, and fail.
As a consequence, it will issue an error code, ERROR(prefix_unknown).
While the error is technically correct
(the decoder could not recognise the header of _next_ frame),
it's confusing, as users will believe that the first header of the first frame is wrong,
which is not the case (it's correct).
It makes it more difficult to understand that the error is in the source size, which is too large.
This patch changes the error code provided in such a scenario.
If (at least) a first frame was successfully decoded,
and then following bytes are garbage values,
the decoder assumes the provided input size is wrong (too large),
and issue the error code ERROR(srcSize_wrong).
2018-05-14 22:32:28 +00:00
int moreThan1Frame = 0 ;
2018-09-12 22:35:21 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressMultiFrame " ) ;
2017-07-06 01:10:07 +00:00
assert ( dict = = NULL | | ddict = = NULL ) ; /* either dict or ddict set, not both */
2017-02-28 23:28:29 +00:00
if ( ddict ) {
dict = ZSTD_DDictDictContent ( ddict ) ;
dictSize = ZSTD_DDictDictSize ( ddict ) ;
}
2017-02-08 00:16:55 +00:00
while ( srcSize > = ZSTD_frameHeaderSize_prefix ) {
# if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
if ( ZSTD_isLegacy ( src , srcSize ) ) {
size_t decodedSize ;
2017-02-25 18:11:15 +00:00
size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy ( src , srcSize ) ;
2017-02-08 00:16:55 +00:00
if ( ZSTD_isError ( frameSize ) ) return frameSize ;
2017-07-06 01:10:07 +00:00
/* legacy support is not compatible with static dctx */
2017-05-27 07:03:08 +00:00
if ( dctx - > staticSize ) return ERROR ( memory_allocation ) ;
2017-02-08 00:16:55 +00:00
decodedSize = ZSTD_decompressLegacy ( dst , dstCapacity , src , frameSize , dict , dictSize ) ;
dst = ( BYTE * ) dst + decodedSize ;
dstCapacity - = decodedSize ;
src = ( const BYTE * ) src + frameSize ;
srcSize - = frameSize ;
continue ;
}
# endif
decompress: changed error code when input is too large
ZSTD_decompress() can decompress multiple frames sent as a single input.
But the input size must be the exact sum of all compressed frames, no more.
In the case of a mistake on srcSize, being larger than required,
ZSTD_decompress() will try to decompress a new frame after current one, and fail.
As a consequence, it will issue an error code, ERROR(prefix_unknown).
While the error is technically correct
(the decoder could not recognise the header of _next_ frame),
it's confusing, as users will believe that the first header of the first frame is wrong,
which is not the case (it's correct).
It makes it more difficult to understand that the error is in the source size, which is too large.
This patch changes the error code provided in such a scenario.
If (at least) a first frame was successfully decoded,
and then following bytes are garbage values,
the decoder assumes the provided input size is wrong (too large),
and issue the error code ERROR(srcSize_wrong).
2018-05-14 22:32:28 +00:00
{ U32 const magicNumber = MEM_readLE32 ( src ) ;
DEBUGLOG ( 4 , " reading magic number %08X (expecting %08X) " ,
( U32 ) magicNumber , ( U32 ) ZSTD_MAGICNUMBER ) ;
2017-02-08 00:16:55 +00:00
if ( ( magicNumber & 0xFFFFFFF0U ) = = ZSTD_MAGIC_SKIPPABLE_START ) {
size_t skippableSize ;
if ( srcSize < ZSTD_skippableHeaderSize )
return ERROR ( srcSize_wrong ) ;
2018-08-14 19:56:21 +00:00
skippableSize = MEM_readLE32 ( ( const BYTE * ) src + ZSTD_FRAMEIDSIZE )
2017-09-25 22:25:07 +00:00
+ ZSTD_skippableHeaderSize ;
2017-07-06 01:10:07 +00:00
if ( srcSize < skippableSize ) return ERROR ( srcSize_wrong ) ;
2017-02-08 00:16:55 +00:00
src = ( const BYTE * ) src + skippableSize ;
srcSize - = skippableSize ;
continue ;
decompress: changed error code when input is too large
ZSTD_decompress() can decompress multiple frames sent as a single input.
But the input size must be the exact sum of all compressed frames, no more.
In the case of a mistake on srcSize, being larger than required,
ZSTD_decompress() will try to decompress a new frame after current one, and fail.
As a consequence, it will issue an error code, ERROR(prefix_unknown).
While the error is technically correct
(the decoder could not recognise the header of _next_ frame),
it's confusing, as users will believe that the first header of the first frame is wrong,
which is not the case (it's correct).
It makes it more difficult to understand that the error is in the source size, which is too large.
This patch changes the error code provided in such a scenario.
If (at least) a first frame was successfully decoded,
and then following bytes are garbage values,
the decoder assumes the provided input size is wrong (too large),
and issue the error code ERROR(srcSize_wrong).
2018-05-14 22:32:28 +00:00
} }
2017-02-08 00:16:55 +00:00
2017-02-26 22:43:07 +00:00
if ( ddict ) {
2017-02-08 00:16:55 +00:00
/* we were called from ZSTD_decompress_usingDDict */
2017-05-16 23:05:27 +00:00
CHECK_F ( ZSTD_decompressBegin_usingDDict ( dctx , ddict ) ) ;
2017-02-08 00:16:55 +00:00
} else {
/* this will initialize correctly with no dict if dict == NULL, so
* use this in all cases but ddict */
CHECK_F ( ZSTD_decompressBegin_usingDict ( dctx , dict , dictSize ) ) ;
}
ZSTD_checkContinuity ( dctx , dst ) ;
2017-02-25 18:11:15 +00:00
{ const size_t res = ZSTD_decompressFrame ( dctx , dst , dstCapacity ,
2017-02-08 00:16:55 +00:00
& src , & srcSize ) ;
decompress: changed error code when input is too large
ZSTD_decompress() can decompress multiple frames sent as a single input.
But the input size must be the exact sum of all compressed frames, no more.
In the case of a mistake on srcSize, being larger than required,
ZSTD_decompress() will try to decompress a new frame after current one, and fail.
As a consequence, it will issue an error code, ERROR(prefix_unknown).
While the error is technically correct
(the decoder could not recognise the header of _next_ frame),
it's confusing, as users will believe that the first header of the first frame is wrong,
which is not the case (it's correct).
It makes it more difficult to understand that the error is in the source size, which is too large.
This patch changes the error code provided in such a scenario.
If (at least) a first frame was successfully decoded,
and then following bytes are garbage values,
the decoder assumes the provided input size is wrong (too large),
and issue the error code ERROR(srcSize_wrong).
2018-05-14 22:32:28 +00:00
if ( ( ZSTD_getErrorCode ( res ) = = ZSTD_error_prefix_unknown )
& & ( moreThan1Frame = = 1 ) ) {
/* at least one frame successfully completed,
* but following bytes are garbage :
* it ' s more likely to be a srcSize error ,
* specifying more bytes than compressed size of frame ( s ) .
* This error message replaces ERROR ( prefix_unknown ) ,
* which would be confusing , as the first header is actually correct .
* Note that one could be unlucky , it might be a corruption error instead ,
* happening right at the place where we expect zstd magic bytes .
* But this is _much_ less likely than a srcSize field error . */
return ERROR ( srcSize_wrong ) ;
}
2017-02-08 00:16:55 +00:00
if ( ZSTD_isError ( res ) ) return res ;
2017-07-06 01:10:07 +00:00
/* no need to bound check, ZSTD_decompressFrame already has */
2017-02-08 00:16:55 +00:00
dst = ( BYTE * ) dst + res ;
dstCapacity - = res ;
}
decompress: changed error code when input is too large
ZSTD_decompress() can decompress multiple frames sent as a single input.
But the input size must be the exact sum of all compressed frames, no more.
In the case of a mistake on srcSize, being larger than required,
ZSTD_decompress() will try to decompress a new frame after current one, and fail.
As a consequence, it will issue an error code, ERROR(prefix_unknown).
While the error is technically correct
(the decoder could not recognise the header of _next_ frame),
it's confusing, as users will believe that the first header of the first frame is wrong,
which is not the case (it's correct).
It makes it more difficult to understand that the error is in the source size, which is too large.
This patch changes the error code provided in such a scenario.
If (at least) a first frame was successfully decoded,
and then following bytes are garbage values,
the decoder assumes the provided input size is wrong (too large),
and issue the error code ERROR(srcSize_wrong).
2018-05-14 22:32:28 +00:00
moreThan1Frame = 1 ;
2017-07-06 01:10:07 +00:00
} /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
2017-02-08 00:16:55 +00:00
2017-02-25 18:11:15 +00:00
if ( srcSize ) return ERROR ( srcSize_wrong ) ; /* input not entirely consumed */
2017-02-08 00:16:55 +00:00
return ( BYTE * ) dst - ( BYTE * ) dststart ;
}
2015-12-18 00:26:48 +00:00
2016-01-26 14:58:49 +00:00
size_t ZSTD_decompress_usingDict ( ZSTD_DCtx * dctx ,
2016-03-19 13:14:31 +00:00
void * dst , size_t dstCapacity ,
2016-09-13 14:52:16 +00:00
const void * src , size_t srcSize ,
const void * dict , size_t dictSize )
2016-01-26 14:58:49 +00:00
{
2017-02-08 00:16:55 +00:00
return ZSTD_decompressMultiFrame ( dctx , dst , dstCapacity , src , srcSize , dict , dictSize , NULL ) ;
2016-01-26 14:58:49 +00:00
}
2016-03-19 13:14:31 +00:00
size_t ZSTD_decompressDCtx ( ZSTD_DCtx * dctx , void * dst , size_t dstCapacity , const void * src , size_t srcSize )
2015-12-18 00:26:48 +00:00
{
2016-03-19 13:14:31 +00:00
return ZSTD_decompress_usingDict ( dctx , dst , dstCapacity , src , srcSize , NULL , 0 ) ;
2015-12-18 00:26:48 +00:00
}
2016-03-11 20:58:04 +00:00
2016-03-19 13:14:31 +00:00
size_t ZSTD_decompress ( void * dst , size_t dstCapacity , const void * src , size_t srcSize )
2015-11-11 12:43:58 +00:00
{
2017-06-03 00:10:49 +00:00
# if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
2016-01-11 11:56:11 +00:00
size_t regenSize ;
2016-06-03 13:41:51 +00:00
ZSTD_DCtx * const dctx = ZSTD_createDCtx ( ) ;
2016-01-11 11:56:11 +00:00
if ( dctx = = NULL ) return ERROR ( memory_allocation ) ;
2016-03-19 13:14:31 +00:00
regenSize = ZSTD_decompressDCtx ( dctx , dst , dstCapacity , src , srcSize ) ;
2016-01-11 11:56:11 +00:00
ZSTD_freeDCtx ( dctx ) ;
return regenSize ;
2016-05-06 14:43:23 +00:00
# else /* stack mode */
2015-12-18 00:26:48 +00:00
ZSTD_DCtx dctx ;
2018-06-14 14:22:24 +00:00
ZSTD_initDCtx_internal ( & dctx ) ;
2016-03-19 13:14:31 +00:00
return ZSTD_decompressDCtx ( & dctx , dst , dstCapacity , src , srcSize ) ;
2016-01-21 15:04:35 +00:00
# endif
2015-11-11 12:43:58 +00:00
}
2016-08-22 22:30:31 +00:00
/*-**************************************
* Advanced Streaming Decompression API
* Bufferless and synchronous
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2016-07-28 17:55:09 +00:00
size_t ZSTD_nextSrcSizeToDecompress ( ZSTD_DCtx * dctx ) { return dctx - > expected ; }
2015-11-11 12:43:58 +00:00
2016-07-28 18:30:25 +00:00
ZSTD_nextInputType_e ZSTD_nextInputType ( ZSTD_DCtx * dctx ) {
switch ( dctx - > stage )
{
default : /* should not happen */
2017-07-03 19:31:55 +00:00
assert ( 0 ) ;
2016-07-28 18:30:25 +00:00
case ZSTDds_getFrameHeaderSize :
case ZSTDds_decodeFrameHeader :
return ZSTDnit_frameHeader ;
case ZSTDds_decodeBlockHeader :
return ZSTDnit_blockHeader ;
case ZSTDds_decompressBlock :
return ZSTDnit_block ;
case ZSTDds_decompressLastBlock :
return ZSTDnit_lastBlock ;
case ZSTDds_checkChecksum :
return ZSTDnit_checksum ;
case ZSTDds_decodeSkippableHeader :
case ZSTDds_skipFrame :
return ZSTDnit_skippableFrame ;
}
}
2017-06-20 23:09:11 +00:00
static int ZSTD_isSkipFrame ( ZSTD_DCtx * dctx ) { return dctx - > stage = = ZSTDds_skipFrame ; }
2016-05-31 10:43:46 +00:00
2016-05-31 16:13:56 +00:00
/** ZSTD_decompressContinue() :
2017-06-20 23:09:11 +00:00
* srcSize : must be the exact nb of bytes expected ( see ZSTD_nextSrcSizeToDecompress ( ) )
* @ return : nb of bytes generated into ` dst ` ( necessarily < = ` dstCapacity )
* or an error code , which can be tested using ZSTD_isError ( ) */
2016-05-10 12:14:19 +00:00
size_t ZSTD_decompressContinue ( ZSTD_DCtx * dctx , void * dst , size_t dstCapacity , const void * src , size_t srcSize )
2015-11-11 12:43:58 +00:00
{
2018-01-16 23:28:43 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressContinue (srcSize:%u) " , ( U32 ) srcSize ) ;
2015-11-11 12:43:58 +00:00
/* Sanity check */
2017-09-25 22:12:09 +00:00
if ( srcSize ! = dctx - > expected ) return ERROR ( srcSize_wrong ) ; /* not allowed */
2016-05-10 12:14:19 +00:00
if ( dstCapacity ) ZSTD_checkContinuity ( dctx , dst ) ;
2015-11-11 12:43:58 +00:00
2016-01-25 15:54:05 +00:00
switch ( dctx - > stage )
2015-11-11 12:43:58 +00:00
{
2015-11-25 13:42:45 +00:00
case ZSTDds_getFrameHeaderSize :
2017-07-03 19:31:55 +00:00
assert ( src ! = NULL ) ;
2017-09-25 22:12:09 +00:00
if ( dctx - > format = = ZSTD_f_zstd1 ) { /* allows header */
2018-08-14 19:56:21 +00:00
assert ( srcSize > = ZSTD_FRAMEIDSIZE ) ; /* to read skippable magic number */
2017-09-25 22:12:09 +00:00
if ( ( MEM_readLE32 ( src ) & 0xFFFFFFF0U ) = = ZSTD_MAGIC_SKIPPABLE_START ) { /* skippable frame */
memcpy ( dctx - > headerBuffer , src , srcSize ) ;
2017-09-25 22:25:07 +00:00
dctx - > expected = ZSTD_skippableHeaderSize - srcSize ; /* remaining to load to get full skippable frame header */
2017-09-25 22:12:09 +00:00
dctx - > stage = ZSTDds_decodeSkippableHeader ;
return 0 ;
} }
dctx - > headerSize = ZSTD_frameHeaderSize_internal ( src , srcSize , dctx - > format ) ;
2016-05-10 12:14:19 +00:00
if ( ZSTD_isError ( dctx - > headerSize ) ) return dctx - > headerSize ;
2017-09-25 22:12:09 +00:00
memcpy ( dctx - > headerBuffer , src , srcSize ) ;
dctx - > expected = dctx - > headerSize - srcSize ;
dctx - > stage = ZSTDds_decodeFrameHeader ;
return 0 ;
2015-11-25 13:42:45 +00:00
case ZSTDds_decodeFrameHeader :
2017-07-03 19:31:55 +00:00
assert ( src ! = NULL ) ;
2017-09-25 22:12:09 +00:00
memcpy ( dctx - > headerBuffer + ( dctx - > headerSize - srcSize ) , src , srcSize ) ;
2016-09-06 13:36:19 +00:00
CHECK_F ( ZSTD_decodeFrameHeader ( dctx , dctx - > headerBuffer , dctx - > headerSize ) ) ;
dctx - > expected = ZSTD_blockHeaderSize ;
dctx - > stage = ZSTDds_decodeBlockHeader ;
return 0 ;
2015-11-25 13:42:45 +00:00
case ZSTDds_decodeBlockHeader :
2016-05-10 12:14:19 +00:00
{ blockProperties_t bp ;
2016-03-24 00:27:55 +00:00
size_t const cBlockSize = ZSTD_getcBlockSize ( src , ZSTD_blockHeaderSize , & bp ) ;
if ( ZSTD_isError ( cBlockSize ) ) return cBlockSize ;
2016-07-27 22:55:43 +00:00
dctx - > expected = cBlockSize ;
dctx - > bType = bp . blockType ;
dctx - > rleSize = bp . origSize ;
if ( cBlockSize ) {
dctx - > stage = bp . lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock ;
return 0 ;
}
/* empty block */
if ( bp . lastBlock ) {
2016-05-31 22:18:28 +00:00
if ( dctx - > fParams . checksumFlag ) {
2016-07-27 22:55:43 +00:00
dctx - > expected = 4 ;
dctx - > stage = ZSTDds_checkChecksum ;
} else {
dctx - > expected = 0 ; /* end of frame */
dctx - > stage = ZSTDds_getFrameHeaderSize ;
2016-05-31 22:18:28 +00:00
}
2016-03-24 00:27:55 +00:00
} else {
2017-06-20 23:09:11 +00:00
dctx - > expected = ZSTD_blockHeaderSize ; /* jump to next header */
2016-07-27 22:55:43 +00:00
dctx - > stage = ZSTDds_decodeBlockHeader ;
2015-11-25 13:42:45 +00:00
}
return 0 ;
}
2017-09-25 22:12:09 +00:00
2016-07-27 22:55:43 +00:00
case ZSTDds_decompressLastBlock :
2015-12-04 16:16:37 +00:00
case ZSTDds_decompressBlock :
2018-01-16 23:28:43 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressContinue: case ZSTDds_decompressBlock " ) ;
2016-05-10 12:14:19 +00:00
{ size_t rSize ;
2016-01-25 15:54:05 +00:00
switch ( dctx - > bType )
2015-11-25 13:42:45 +00:00
{
case bt_compressed :
2018-01-16 23:28:43 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressContinue: case bt_compressed " ) ;
2017-09-16 00:22:38 +00:00
rSize = ZSTD_decompressBlock_internal ( dctx , dst , dstCapacity , src , srcSize , /* frame */ 1 ) ;
2015-11-25 13:42:45 +00:00
break ;
case bt_raw :
2016-05-10 12:14:19 +00:00
rSize = ZSTD_copyRawBlock ( dst , dstCapacity , src , srcSize ) ;
2015-11-25 13:42:45 +00:00
break ;
case bt_rle :
2016-07-27 22:55:43 +00:00
rSize = ZSTD_setRleBlock ( dst , dstCapacity , src , srcSize , dctx - > rleSize ) ;
2015-11-25 13:42:45 +00:00
break ;
2016-07-27 22:55:43 +00:00
case bt_reserved : /* should never happen */
2015-11-25 13:42:45 +00:00
default :
2016-07-27 22:55:43 +00:00
return ERROR ( corruption_detected ) ;
2015-11-25 13:42:45 +00:00
}
2016-05-31 22:44:36 +00:00
if ( ZSTD_isError ( rSize ) ) return rSize ;
2018-01-16 23:28:43 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressContinue: decoded size from block : %u " , ( U32 ) rSize ) ;
2017-09-09 21:37:28 +00:00
dctx - > decodedSize + = rSize ;
2016-05-31 22:18:28 +00:00
if ( dctx - > fParams . checksumFlag ) XXH64_update ( & dctx - > xxhState , dst , rSize ) ;
2016-07-27 22:55:43 +00:00
if ( dctx - > stage = = ZSTDds_decompressLastBlock ) { /* end of frame */
2018-01-16 23:28:43 +00:00
DEBUGLOG ( 4 , " ZSTD_decompressContinue: decoded size from frame : %u " , ( U32 ) dctx - > decodedSize ) ;
2017-09-09 21:37:28 +00:00
if ( dctx - > fParams . frameContentSize ! = ZSTD_CONTENTSIZE_UNKNOWN ) {
if ( dctx - > decodedSize ! = dctx - > fParams . frameContentSize ) {
return ERROR ( corruption_detected ) ;
} }
2016-07-27 22:55:43 +00:00
if ( dctx - > fParams . checksumFlag ) { /* another round for frame checksum */
2016-07-28 17:55:09 +00:00
dctx - > expected = 4 ;
2016-07-27 22:55:43 +00:00
dctx - > stage = ZSTDds_checkChecksum ;
2016-07-28 17:55:09 +00:00
} else {
dctx - > expected = 0 ; /* ends here */
dctx - > stage = ZSTDds_getFrameHeaderSize ;
2016-07-27 22:55:43 +00:00
}
} else {
dctx - > stage = ZSTDds_decodeBlockHeader ;
dctx - > expected = ZSTD_blockHeaderSize ;
dctx - > previousDstEnd = ( char * ) dst + rSize ;
}
2015-11-25 13:42:45 +00:00
return rSize ;
2015-11-11 12:43:58 +00:00
}
2017-09-25 22:12:09 +00:00
2016-07-27 22:55:43 +00:00
case ZSTDds_checkChecksum :
2017-09-09 21:37:28 +00:00
assert ( srcSize = = 4 ) ; /* guaranteed by dctx->expected */
2016-07-27 22:55:43 +00:00
{ U32 const h32 = ( U32 ) XXH64_digest ( & dctx - > xxhState ) ;
2017-09-09 21:37:28 +00:00
U32 const check32 = MEM_readLE32 ( src ) ;
2018-01-16 23:28:43 +00:00
DEBUGLOG ( 4 , " ZSTD_decompressContinue: checksum : calculated %08X :: %08X read " , h32 , check32 ) ;
2016-07-27 22:55:43 +00:00
if ( check32 ! = h32 ) return ERROR ( checksum_wrong ) ;
dctx - > expected = 0 ;
dctx - > stage = ZSTDds_getFrameHeaderSize ;
return 0 ;
}
2017-09-25 22:12:09 +00:00
2016-05-31 10:43:46 +00:00
case ZSTDds_decodeSkippableHeader :
2017-09-25 22:25:07 +00:00
assert ( src ! = NULL ) ;
assert ( srcSize < = ZSTD_skippableHeaderSize ) ;
memcpy ( dctx - > headerBuffer + ( ZSTD_skippableHeaderSize - srcSize ) , src , srcSize ) ; /* complete skippable header */
2018-08-14 19:56:21 +00:00
dctx - > expected = MEM_readLE32 ( dctx - > headerBuffer + ZSTD_FRAMEIDSIZE ) ; /* note : dctx->expected can grow seriously large, beyond local buffer size */
2017-09-25 22:25:07 +00:00
dctx - > stage = ZSTDds_skipFrame ;
return 0 ;
2017-09-25 22:12:09 +00:00
2016-05-31 10:43:46 +00:00
case ZSTDds_skipFrame :
2017-09-25 22:25:07 +00:00
dctx - > expected = 0 ;
dctx - > stage = ZSTDds_getFrameHeaderSize ;
return 0 ;
2017-09-25 22:12:09 +00:00
2015-11-25 13:42:45 +00:00
default :
return ERROR ( GENERIC ) ; /* impossible */
2015-11-11 12:43:58 +00:00
}
}
2016-06-19 21:06:54 +00:00
static size_t ZSTD_refDictContent ( ZSTD_DCtx * dctx , const void * dict , size_t dictSize )
2015-12-04 16:16:37 +00:00
{
2016-01-25 15:54:05 +00:00
dctx - > dictEnd = dctx - > previousDstEnd ;
2018-06-01 22:18:32 +00:00
dctx - > virtualStart = ( const char * ) dict - ( ( const char * ) ( dctx - > previousDstEnd ) - ( const char * ) ( dctx - > prefixStart ) ) ;
dctx - > prefixStart = dict ;
2016-01-25 15:54:05 +00:00
dctx - > previousDstEnd = ( const char * ) dict + dictSize ;
2016-06-19 21:06:54 +00:00
return 0 ;
2015-12-04 16:16:37 +00:00
}
2016-01-26 02:14:20 +00:00
2018-09-12 00:23:44 +00:00
/*! ZSTD_loadEntropy() :
* dict : must point at beginning of a valid zstd dictionary .
2017-02-25 18:11:15 +00:00
* @ return : size of entropy tables read */
2018-09-07 00:07:53 +00:00
static size_t ZSTD_loadEntropy ( ZSTD_entropyDTables_t * entropy ,
const void * const dict , size_t const dictSize )
2016-01-26 02:14:20 +00:00
{
2016-06-15 12:05:07 +00:00
const BYTE * dictPtr = ( const BYTE * ) dict ;
2016-06-15 23:41:50 +00:00
const BYTE * const dictEnd = dictPtr + dictSize ;
2016-01-27 23:18:06 +00:00
2017-02-25 18:11:15 +00:00
if ( dictSize < = 8 ) return ERROR ( dictionary_corrupted ) ;
2018-09-07 00:54:13 +00:00
assert ( MEM_readLE32 ( dict ) = = ZSTD_MAGIC_DICTIONARY ) ; /* dict must be valid */
2017-02-25 18:11:15 +00:00
dictPtr + = 8 ; /* skip header = magic + dictID */
2018-09-12 00:23:44 +00:00
ZSTD_STATIC_ASSERT ( offsetof ( ZSTD_entropyDTables_t , OFTable ) = = offsetof ( ZSTD_entropyDTables_t , LLTable ) + sizeof ( entropy - > LLTable ) ) ;
ZSTD_STATIC_ASSERT ( offsetof ( ZSTD_entropyDTables_t , MLTable ) = = offsetof ( ZSTD_entropyDTables_t , OFTable ) + sizeof ( entropy - > OFTable ) ) ;
ZSTD_STATIC_ASSERT ( sizeof ( entropy - > LLTable ) + sizeof ( entropy - > OFTable ) + sizeof ( entropy - > MLTable ) > = HUF_DECOMPRESS_WORKSPACE_SIZE ) ;
{ void * const workspace = & entropy - > LLTable ; /* use fse tables as temporary workspace; implies fse tables are grouped together */
size_t const workspaceSize = sizeof ( entropy - > LLTable ) + sizeof ( entropy - > OFTable ) + sizeof ( entropy - > MLTable ) ;
2018-09-07 00:07:53 +00:00
size_t const hSize = HUF_readDTableX2_wksp ( entropy - > hufTable ,
dictPtr , dictEnd - dictPtr ,
workspace , workspaceSize ) ;
2016-05-31 22:18:28 +00:00
if ( HUF_isError ( hSize ) ) return ERROR ( dictionary_corrupted ) ;
2016-06-15 12:05:07 +00:00
dictPtr + = hSize ;
2016-05-31 22:18:28 +00:00
}
2016-01-27 23:18:06 +00:00
2016-05-06 14:43:23 +00:00
{ short offcodeNCount [ MaxOff + 1 ] ;
2017-02-25 18:11:15 +00:00
U32 offcodeMaxValue = MaxOff , offcodeLog ;
2016-06-15 12:05:07 +00:00
size_t const offcodeHeaderSize = FSE_readNCount ( offcodeNCount , & offcodeMaxValue , & offcodeLog , dictPtr , dictEnd - dictPtr ) ;
2016-05-06 14:43:23 +00:00
if ( FSE_isError ( offcodeHeaderSize ) ) return ERROR ( dictionary_corrupted ) ;
2018-02-09 12:25:15 +00:00
if ( offcodeMaxValue > MaxOff ) return ERROR ( dictionary_corrupted ) ;
Fix buffer overrun in ZSTD_loadEntropy()
The table log set by `FSE_readNCount()` was not checked in
`ZSTD_loadEntropy()`. This caused `FSE_buildDTable(dctx->MLTable, ...)`
to overwrite the beginning of `dctx->hufTable`.
The benchmarks look good, there is no obvious performance regression:
> ./zstds/zstd.opt.0 -i10 -b1 -e5 ~/bench/silesia.tar
1#silesia.tar : 211988480 -> 73656930 (2.878), 268.2 MB/s , 701.0 MB/s
2#silesia.tar : 211988480 -> 70162842 (3.021), 199.5 MB/s , 666.9 MB/s
3#silesia.tar : 211988480 -> 66997986 (3.164), 154.9 MB/s , 655.6 MB/s
4#silesia.tar : 211988480 -> 66002591 (3.212), 128.9 MB/s , 648.4 MB/s
5#silesia.tar : 211988480 -> 65008480 (3.261), 98.4 MB/s , 633.4 MB/s
> ./zstds/zstd.opt.2 -i10 -b1 -e5 ~/bench/silesia.tar
1#silesia.tar : 211988480 -> 73656930 (2.878), 266.1 MB/s , 703.7 MB/s
2#silesia.tar : 211988480 -> 70162842 (3.021), 199.0 MB/s , 666.6 MB/s
3#silesia.tar : 211988480 -> 66997986 (3.164), 156.2 MB/s , 656.2 MB/s
4#silesia.tar : 211988480 -> 66002591 (3.212), 133.2 MB/s , 647.4 MB/s
5#silesia.tar : 211988480 -> 65008480 (3.261), 96.3 MB/s , 633.3 MB/s
2016-10-17 22:49:50 +00:00
if ( offcodeLog > OffFSELog ) return ERROR ( dictionary_corrupted ) ;
2018-09-07 00:07:53 +00:00
ZSTD_buildFSETable ( entropy - > OFTable ,
2018-02-09 12:25:15 +00:00
offcodeNCount , offcodeMaxValue ,
OF_base , OF_bits ,
offcodeLog ) ;
2016-06-15 12:05:07 +00:00
dictPtr + = offcodeHeaderSize ;
2016-05-06 14:43:23 +00:00
}
{ short matchlengthNCount [ MaxML + 1 ] ;
Fix buffer overrun in ZSTD_loadEntropy()
The table log set by `FSE_readNCount()` was not checked in
`ZSTD_loadEntropy()`. This caused `FSE_buildDTable(dctx->MLTable, ...)`
to overwrite the beginning of `dctx->hufTable`.
The benchmarks look good, there is no obvious performance regression:
> ./zstds/zstd.opt.0 -i10 -b1 -e5 ~/bench/silesia.tar
1#silesia.tar : 211988480 -> 73656930 (2.878), 268.2 MB/s , 701.0 MB/s
2#silesia.tar : 211988480 -> 70162842 (3.021), 199.5 MB/s , 666.9 MB/s
3#silesia.tar : 211988480 -> 66997986 (3.164), 154.9 MB/s , 655.6 MB/s
4#silesia.tar : 211988480 -> 66002591 (3.212), 128.9 MB/s , 648.4 MB/s
5#silesia.tar : 211988480 -> 65008480 (3.261), 98.4 MB/s , 633.4 MB/s
> ./zstds/zstd.opt.2 -i10 -b1 -e5 ~/bench/silesia.tar
1#silesia.tar : 211988480 -> 73656930 (2.878), 266.1 MB/s , 703.7 MB/s
2#silesia.tar : 211988480 -> 70162842 (3.021), 199.0 MB/s , 666.6 MB/s
3#silesia.tar : 211988480 -> 66997986 (3.164), 156.2 MB/s , 656.2 MB/s
4#silesia.tar : 211988480 -> 66002591 (3.212), 133.2 MB/s , 647.4 MB/s
5#silesia.tar : 211988480 -> 65008480 (3.261), 96.3 MB/s , 633.3 MB/s
2016-10-17 22:49:50 +00:00
unsigned matchlengthMaxValue = MaxML , matchlengthLog ;
2016-06-15 12:05:07 +00:00
size_t const matchlengthHeaderSize = FSE_readNCount ( matchlengthNCount , & matchlengthMaxValue , & matchlengthLog , dictPtr , dictEnd - dictPtr ) ;
2016-05-06 14:43:23 +00:00
if ( FSE_isError ( matchlengthHeaderSize ) ) return ERROR ( dictionary_corrupted ) ;
2018-02-09 12:25:15 +00:00
if ( matchlengthMaxValue > MaxML ) return ERROR ( dictionary_corrupted ) ;
Fix buffer overrun in ZSTD_loadEntropy()
The table log set by `FSE_readNCount()` was not checked in
`ZSTD_loadEntropy()`. This caused `FSE_buildDTable(dctx->MLTable, ...)`
to overwrite the beginning of `dctx->hufTable`.
The benchmarks look good, there is no obvious performance regression:
> ./zstds/zstd.opt.0 -i10 -b1 -e5 ~/bench/silesia.tar
1#silesia.tar : 211988480 -> 73656930 (2.878), 268.2 MB/s , 701.0 MB/s
2#silesia.tar : 211988480 -> 70162842 (3.021), 199.5 MB/s , 666.9 MB/s
3#silesia.tar : 211988480 -> 66997986 (3.164), 154.9 MB/s , 655.6 MB/s
4#silesia.tar : 211988480 -> 66002591 (3.212), 128.9 MB/s , 648.4 MB/s
5#silesia.tar : 211988480 -> 65008480 (3.261), 98.4 MB/s , 633.4 MB/s
> ./zstds/zstd.opt.2 -i10 -b1 -e5 ~/bench/silesia.tar
1#silesia.tar : 211988480 -> 73656930 (2.878), 266.1 MB/s , 703.7 MB/s
2#silesia.tar : 211988480 -> 70162842 (3.021), 199.0 MB/s , 666.6 MB/s
3#silesia.tar : 211988480 -> 66997986 (3.164), 156.2 MB/s , 656.2 MB/s
4#silesia.tar : 211988480 -> 66002591 (3.212), 133.2 MB/s , 647.4 MB/s
5#silesia.tar : 211988480 -> 65008480 (3.261), 96.3 MB/s , 633.3 MB/s
2016-10-17 22:49:50 +00:00
if ( matchlengthLog > MLFSELog ) return ERROR ( dictionary_corrupted ) ;
2018-09-07 00:07:53 +00:00
ZSTD_buildFSETable ( entropy - > MLTable ,
2018-02-09 12:25:15 +00:00
matchlengthNCount , matchlengthMaxValue ,
ML_base , ML_bits ,
matchlengthLog ) ;
2016-06-15 12:05:07 +00:00
dictPtr + = matchlengthHeaderSize ;
2016-05-06 14:43:23 +00:00
}
{ short litlengthNCount [ MaxLL + 1 ] ;
Fix buffer overrun in ZSTD_loadEntropy()
The table log set by `FSE_readNCount()` was not checked in
`ZSTD_loadEntropy()`. This caused `FSE_buildDTable(dctx->MLTable, ...)`
to overwrite the beginning of `dctx->hufTable`.
The benchmarks look good, there is no obvious performance regression:
> ./zstds/zstd.opt.0 -i10 -b1 -e5 ~/bench/silesia.tar
1#silesia.tar : 211988480 -> 73656930 (2.878), 268.2 MB/s , 701.0 MB/s
2#silesia.tar : 211988480 -> 70162842 (3.021), 199.5 MB/s , 666.9 MB/s
3#silesia.tar : 211988480 -> 66997986 (3.164), 154.9 MB/s , 655.6 MB/s
4#silesia.tar : 211988480 -> 66002591 (3.212), 128.9 MB/s , 648.4 MB/s
5#silesia.tar : 211988480 -> 65008480 (3.261), 98.4 MB/s , 633.4 MB/s
> ./zstds/zstd.opt.2 -i10 -b1 -e5 ~/bench/silesia.tar
1#silesia.tar : 211988480 -> 73656930 (2.878), 266.1 MB/s , 703.7 MB/s
2#silesia.tar : 211988480 -> 70162842 (3.021), 199.0 MB/s , 666.6 MB/s
3#silesia.tar : 211988480 -> 66997986 (3.164), 156.2 MB/s , 656.2 MB/s
4#silesia.tar : 211988480 -> 66002591 (3.212), 133.2 MB/s , 647.4 MB/s
5#silesia.tar : 211988480 -> 65008480 (3.261), 96.3 MB/s , 633.3 MB/s
2016-10-17 22:49:50 +00:00
unsigned litlengthMaxValue = MaxLL , litlengthLog ;
2016-06-15 12:05:07 +00:00
size_t const litlengthHeaderSize = FSE_readNCount ( litlengthNCount , & litlengthMaxValue , & litlengthLog , dictPtr , dictEnd - dictPtr ) ;
2016-05-06 14:43:23 +00:00
if ( FSE_isError ( litlengthHeaderSize ) ) return ERROR ( dictionary_corrupted ) ;
2018-02-09 12:25:15 +00:00
if ( litlengthMaxValue > MaxLL ) return ERROR ( dictionary_corrupted ) ;
Fix buffer overrun in ZSTD_loadEntropy()
The table log set by `FSE_readNCount()` was not checked in
`ZSTD_loadEntropy()`. This caused `FSE_buildDTable(dctx->MLTable, ...)`
to overwrite the beginning of `dctx->hufTable`.
The benchmarks look good, there is no obvious performance regression:
> ./zstds/zstd.opt.0 -i10 -b1 -e5 ~/bench/silesia.tar
1#silesia.tar : 211988480 -> 73656930 (2.878), 268.2 MB/s , 701.0 MB/s
2#silesia.tar : 211988480 -> 70162842 (3.021), 199.5 MB/s , 666.9 MB/s
3#silesia.tar : 211988480 -> 66997986 (3.164), 154.9 MB/s , 655.6 MB/s
4#silesia.tar : 211988480 -> 66002591 (3.212), 128.9 MB/s , 648.4 MB/s
5#silesia.tar : 211988480 -> 65008480 (3.261), 98.4 MB/s , 633.4 MB/s
> ./zstds/zstd.opt.2 -i10 -b1 -e5 ~/bench/silesia.tar
1#silesia.tar : 211988480 -> 73656930 (2.878), 266.1 MB/s , 703.7 MB/s
2#silesia.tar : 211988480 -> 70162842 (3.021), 199.0 MB/s , 666.6 MB/s
3#silesia.tar : 211988480 -> 66997986 (3.164), 156.2 MB/s , 656.2 MB/s
4#silesia.tar : 211988480 -> 66002591 (3.212), 133.2 MB/s , 647.4 MB/s
5#silesia.tar : 211988480 -> 65008480 (3.261), 96.3 MB/s , 633.3 MB/s
2016-10-17 22:49:50 +00:00
if ( litlengthLog > LLFSELog ) return ERROR ( dictionary_corrupted ) ;
2018-09-07 00:07:53 +00:00
ZSTD_buildFSETable ( entropy - > LLTable ,
2018-02-09 12:25:15 +00:00
litlengthNCount , litlengthMaxValue ,
LL_base , LL_bits ,
litlengthLog ) ;
2016-06-15 12:05:07 +00:00
dictPtr + = litlengthHeaderSize ;
2016-05-06 14:43:23 +00:00
}
2016-01-27 23:18:06 +00:00
2016-06-15 16:48:51 +00:00
if ( dictPtr + 12 > dictEnd ) return ERROR ( dictionary_corrupted ) ;
2017-02-26 02:33:31 +00:00
{ int i ;
2017-02-26 22:43:07 +00:00
size_t const dictContentSize = ( size_t ) ( dictEnd - ( dictPtr + 12 ) ) ;
2017-02-26 02:33:31 +00:00
for ( i = 0 ; i < 3 ; i + + ) {
U32 const rep = MEM_readLE32 ( dictPtr ) ; dictPtr + = 4 ;
2017-02-26 22:43:07 +00:00
if ( rep = = 0 | | rep > = dictContentSize ) return ERROR ( dictionary_corrupted ) ;
2017-02-26 18:16:42 +00:00
entropy - > rep [ i ] = rep ;
2017-02-26 02:33:31 +00:00
} }
2016-06-15 12:05:07 +00:00
return dictPtr - ( const BYTE * ) dict ;
2016-01-26 02:14:20 +00:00
}
2016-01-26 14:58:49 +00:00
static size_t ZSTD_decompress_insertDictionary ( ZSTD_DCtx * dctx , const void * dict , size_t dictSize )
2016-01-26 02:14:20 +00:00
{
2016-06-19 21:06:54 +00:00
if ( dictSize < 8 ) return ZSTD_refDictContent ( dctx , dict , dictSize ) ;
2016-05-29 03:01:04 +00:00
{ U32 const magic = MEM_readLE32 ( dict ) ;
2017-06-27 20:50:34 +00:00
if ( magic ! = ZSTD_MAGIC_DICTIONARY ) {
2016-06-19 21:06:54 +00:00
return ZSTD_refDictContent ( dctx , dict , dictSize ) ; /* pure content mode */
} }
2018-08-14 19:56:21 +00:00
dctx - > dictID = MEM_readLE32 ( ( const char * ) dict + ZSTD_FRAMEIDSIZE ) ;
2016-06-19 21:06:54 +00:00
/* load entropy tables */
2017-02-26 18:16:42 +00:00
{ size_t const eSize = ZSTD_loadEntropy ( & dctx - > entropy , dict , dictSize ) ;
2016-06-19 21:06:54 +00:00
if ( ZSTD_isError ( eSize ) ) return ERROR ( dictionary_corrupted ) ;
dict = ( const char * ) dict + eSize ;
dictSize - = eSize ;
2016-01-26 02:14:20 +00:00
}
2017-02-26 02:33:31 +00:00
dctx - > litEntropy = dctx - > fseEntropy = 1 ;
2016-06-19 21:06:54 +00:00
/* reference dictionary content */
return ZSTD_refDictContent ( dctx , dict , dictSize ) ;
2016-01-26 02:14:20 +00:00
}
2017-09-27 20:51:05 +00:00
size_t ZSTD_decompressBegin ( ZSTD_DCtx * dctx )
{
assert ( dctx ! = NULL ) ;
dctx - > expected = ZSTD_startingInputLength ( dctx - > format ) ; /* dctx->format must be properly set */
dctx - > stage = ZSTDds_getFrameHeaderSize ;
dctx - > decodedSize = 0 ;
dctx - > previousDstEnd = NULL ;
2018-06-01 22:18:32 +00:00
dctx - > prefixStart = NULL ;
dctx - > virtualStart = NULL ;
2017-09-27 20:51:05 +00:00
dctx - > dictEnd = NULL ;
dctx - > entropy . hufTable [ 0 ] = ( HUF_DTable ) ( ( HufLog ) * 0x1000001 ) ; /* cover both little and big endian */
dctx - > litEntropy = dctx - > fseEntropy = 0 ;
dctx - > dictID = 0 ;
ZSTD_STATIC_ASSERT ( sizeof ( dctx - > entropy . rep ) = = sizeof ( repStartValue ) ) ;
memcpy ( dctx - > entropy . rep , repStartValue , sizeof ( repStartValue ) ) ; /* initial repcodes */
dctx - > LLTptr = dctx - > entropy . LLTable ;
dctx - > MLTptr = dctx - > entropy . MLTable ;
dctx - > OFTptr = dctx - > entropy . OFTable ;
dctx - > HUFptr = dctx - > entropy . hufTable ;
return 0 ;
}
2016-01-26 14:58:49 +00:00
size_t ZSTD_decompressBegin_usingDict ( ZSTD_DCtx * dctx , const void * dict , size_t dictSize )
{
2017-07-06 01:10:07 +00:00
CHECK_F ( ZSTD_decompressBegin ( dctx ) ) ;
if ( dict & & dictSize )
CHECK_E ( ZSTD_decompress_insertDictionary ( dctx , dict , dictSize ) , dictionary_corrupted ) ;
2016-01-26 14:58:49 +00:00
return 0 ;
}
2016-06-06 22:51:51 +00:00
2016-09-14 14:55:44 +00:00
/* ====== ZSTD_DDict ====== */
2016-06-06 22:51:51 +00:00
struct ZSTD_DDict_s {
2016-12-21 15:44:35 +00:00
void * dictBuffer ;
const void * dictContent ;
2016-07-08 17:16:57 +00:00
size_t dictSize ;
2017-07-13 02:08:24 +00:00
ZSTD_entropyDTables_t entropy ;
2017-02-26 22:43:07 +00:00
U32 dictID ;
U32 entropyPresent ;
2017-02-27 08:27:30 +00:00
ZSTD_customMem cMem ;
2016-08-29 16:03:12 +00:00
} ; /* typedef'd to ZSTD_DDict within "zstd.h" */
2016-06-06 22:51:51 +00:00
2017-02-28 23:28:29 +00:00
static const void * ZSTD_DDictDictContent ( const ZSTD_DDict * ddict )
{
2018-09-12 22:35:21 +00:00
assert ( ddict ! = NULL ) ;
2017-02-28 23:28:29 +00:00
return ddict - > dictContent ;
}
static size_t ZSTD_DDictDictSize ( const ZSTD_DDict * ddict )
{
2018-09-12 22:35:21 +00:00
assert ( ddict ! = NULL ) ;
2017-02-28 23:28:29 +00:00
return ddict - > dictSize ;
}
2018-09-12 00:23:44 +00:00
size_t ZSTD_decompressBegin_usingDDict ( ZSTD_DCtx * dctx , const ZSTD_DDict * ddict )
2017-02-26 22:43:07 +00:00
{
2018-09-12 00:23:44 +00:00
DEBUGLOG ( 4 , " ZSTD_decompressBegin_usingDDict " ) ;
assert ( dctx ! = NULL ) ;
if ( ddict ) {
2018-09-12 22:35:21 +00:00
dctx - > ddictIsCold = ( dctx - > dictEnd ! = ( const char * ) ddict - > dictContent + ddict - > dictSize ) ;
DEBUGLOG ( 2 , " DDict is %s " ,
2018-09-12 00:23:44 +00:00
dctx - > ddictIsCold ? " ~cold~ " : " hot! " ) ;
}
CHECK_F ( ZSTD_decompressBegin ( dctx ) ) ;
if ( ddict ) { /* NULL ddict is equivalent to no dictionary */
dctx - > dictID = ddict - > dictID ;
dctx - > prefixStart = ddict - > dictContent ;
dctx - > virtualStart = ddict - > dictContent ;
dctx - > dictEnd = ( const BYTE * ) ddict - > dictContent + ddict - > dictSize ;
dctx - > previousDstEnd = dctx - > dictEnd ;
2017-02-26 22:43:07 +00:00
if ( ddict - > entropyPresent ) {
2018-09-12 00:23:44 +00:00
dctx - > litEntropy = 1 ;
dctx - > fseEntropy = 1 ;
dctx - > LLTptr = ddict - > entropy . LLTable ;
dctx - > MLTptr = ddict - > entropy . MLTable ;
dctx - > OFTptr = ddict - > entropy . OFTable ;
dctx - > HUFptr = ddict - > entropy . hufTable ;
dctx - > entropy . rep [ 0 ] = ddict - > entropy . rep [ 0 ] ;
dctx - > entropy . rep [ 1 ] = ddict - > entropy . rep [ 1 ] ;
dctx - > entropy . rep [ 2 ] = ddict - > entropy . rep [ 2 ] ;
2017-02-26 22:43:07 +00:00
} else {
2018-09-12 00:23:44 +00:00
dctx - > litEntropy = 0 ;
dctx - > fseEntropy = 0 ;
2017-02-26 22:43:07 +00:00
}
}
2017-05-16 23:05:27 +00:00
return 0 ;
2017-02-26 22:43:07 +00:00
}
2018-09-07 00:07:53 +00:00
static size_t
ZSTD_loadEntropy_inDDict ( ZSTD_DDict * ddict ,
ZSTD_dictContentType_e dictContentType )
2017-02-26 22:43:07 +00:00
{
2017-03-02 20:33:02 +00:00
ddict - > dictID = 0 ;
2017-02-26 22:43:07 +00:00
ddict - > entropyPresent = 0 ;
2018-03-20 22:13:14 +00:00
if ( dictContentType = = ZSTD_dct_rawContent ) return 0 ;
2018-03-20 20:40:29 +00:00
if ( ddict - > dictSize < 8 ) {
2018-03-20 22:13:14 +00:00
if ( dictContentType = = ZSTD_dct_fullDict )
2018-03-20 20:40:29 +00:00
return ERROR ( dictionary_corrupted ) ; /* only accept specified dictionaries */
return 0 ; /* pure content mode */
}
2017-02-26 22:43:07 +00:00
{ U32 const magic = MEM_readLE32 ( ddict - > dictContent ) ;
2018-03-20 20:40:29 +00:00
if ( magic ! = ZSTD_MAGIC_DICTIONARY ) {
2018-03-20 22:13:14 +00:00
if ( dictContentType = = ZSTD_dct_fullDict )
2018-03-20 20:40:29 +00:00
return ERROR ( dictionary_corrupted ) ; /* only accept specified dictionaries */
return 0 ; /* pure content mode */
}
2017-02-26 22:43:07 +00:00
}
2018-08-14 19:56:21 +00:00
ddict - > dictID = MEM_readLE32 ( ( const char * ) ddict - > dictContent + ZSTD_FRAMEIDSIZE ) ;
2017-02-26 22:43:07 +00:00
/* load entropy tables */
2018-09-07 00:07:53 +00:00
CHECK_E ( ZSTD_loadEntropy ( & ddict - > entropy ,
ddict - > dictContent , ddict - > dictSize ) ,
dictionary_corrupted ) ;
2017-02-26 22:43:07 +00:00
ddict - > entropyPresent = 1 ;
return 0 ;
}
2017-02-27 08:27:30 +00:00
2018-03-20 20:40:29 +00:00
static size_t ZSTD_initDDict_internal ( ZSTD_DDict * ddict ,
const void * dict , size_t dictSize ,
ZSTD_dictLoadMethod_e dictLoadMethod ,
2018-03-20 22:13:14 +00:00
ZSTD_dictContentType_e dictContentType )
2017-05-25 22:44:06 +00:00
{
2017-08-29 18:55:02 +00:00
if ( ( dictLoadMethod = = ZSTD_dlm_byRef ) | | ( ! dict ) | | ( ! dictSize ) ) {
2017-05-25 22:44:06 +00:00
ddict - > dictBuffer = NULL ;
ddict - > dictContent = dict ;
2018-08-16 00:41:44 +00:00
if ( ! dict ) dictSize = 0 ;
2017-05-25 22:44:06 +00:00
} else {
void * const internalBuffer = ZSTD_malloc ( dictSize , ddict - > cMem ) ;
ddict - > dictBuffer = internalBuffer ;
ddict - > dictContent = internalBuffer ;
2017-06-21 20:26:10 +00:00
if ( ! internalBuffer ) return ERROR ( memory_allocation ) ;
memcpy ( internalBuffer , dict , dictSize ) ;
2017-05-25 22:44:06 +00:00
}
ddict - > dictSize = dictSize ;
ddict - > entropy . hufTable [ 0 ] = ( HUF_DTable ) ( ( HufLog ) * 0x1000001 ) ; /* cover both little and big endian */
/* parse dictionary content */
2018-03-20 20:40:29 +00:00
CHECK_F ( ZSTD_loadEntropy_inDDict ( ddict , dictContentType ) ) ;
2017-05-25 22:44:06 +00:00
return 0 ;
}
2018-03-20 20:40:29 +00:00
ZSTD_DDict * ZSTD_createDDict_advanced ( const void * dict , size_t dictSize ,
ZSTD_dictLoadMethod_e dictLoadMethod ,
2018-03-20 22:13:14 +00:00
ZSTD_dictContentType_e dictContentType ,
2018-03-20 20:40:29 +00:00
ZSTD_customMem customMem )
2016-06-06 22:51:51 +00:00
{
2017-05-31 00:11:39 +00:00
if ( ! customMem . customAlloc ^ ! customMem . customFree ) return NULL ;
2016-06-06 22:51:51 +00:00
2016-08-29 04:05:43 +00:00
{ ZSTD_DDict * const ddict = ( ZSTD_DDict * ) ZSTD_malloc ( sizeof ( ZSTD_DDict ) , customMem ) ;
2018-09-07 00:07:53 +00:00
if ( ddict = = NULL ) return NULL ;
2017-02-27 08:27:30 +00:00
ddict - > cMem = customMem ;
2018-09-07 00:07:53 +00:00
{ size_t const initResult = ZSTD_initDDict_internal ( ddict ,
dict , dictSize ,
dictLoadMethod , dictContentType ) ;
if ( ZSTD_isError ( initResult ) ) {
ZSTD_freeDDict ( ddict ) ;
return NULL ;
} }
2016-06-06 22:51:51 +00:00
return ddict ;
}
}
/*! ZSTD_createDDict() :
2017-02-26 22:43:07 +00:00
* Create a digested dictionary , to start decompression without startup delay .
* ` dict ` content is copied inside DDict .
* Consequently , ` dict ` can be released after ` ZSTD_DDict ` creation */
2016-06-06 22:51:51 +00:00
ZSTD_DDict * ZSTD_createDDict ( const void * dict , size_t dictSize )
{
ZSTD_customMem const allocator = { NULL , NULL , NULL } ;
2018-03-20 22:13:14 +00:00
return ZSTD_createDDict_advanced ( dict , dictSize , ZSTD_dlm_byCopy , ZSTD_dct_auto , allocator ) ;
2016-06-06 22:51:51 +00:00
}
2016-12-21 18:25:15 +00:00
/*! ZSTD_createDDict_byReference() :
2017-02-26 22:43:07 +00:00
* Create a digested dictionary , to start decompression without startup delay .
* Dictionary content is simply referenced , it will be accessed during decompression .
* Warning : dictBuffer must outlive DDict ( DDict must be freed before dictBuffer ) */
2016-12-21 18:25:15 +00:00
ZSTD_DDict * ZSTD_createDDict_byReference ( const void * dictBuffer , size_t dictSize )
{
ZSTD_customMem const allocator = { NULL , NULL , NULL } ;
2018-03-20 22:13:14 +00:00
return ZSTD_createDDict_advanced ( dictBuffer , dictSize , ZSTD_dlm_byRef , ZSTD_dct_auto , allocator ) ;
2016-12-21 18:25:15 +00:00
}
changed initStatic?Dict() return type to const ZSTD_?Dict*
ZSTD_create?Dict() is required to produce a ?Dict* return type
because `free()` does not accept a `const type*` argument.
If it wasn't for this restriction, I would have preferred to create a `const ?Dict*` object
to emphasize the fact that, once created, a dictionary never changes
(hence can be shared concurrently until the end of its lifetime).
There is no such limitation with initStatic?Dict() :
as stated in the doc, there is no corresponding free() function,
since `workspace` is provided, hence allocated, externally,
it can only be free() externally.
Which means, ZSTD_initStatic?Dict() can return a `const ZSTD_?Dict*` pointer.
Tested with `make all`, to catch initStatic's users,
which, incidentally, also updated zstd.h documentation.
2018-01-17 22:08:48 +00:00
const ZSTD_DDict * ZSTD_initStaticDDict (
2018-09-07 00:07:53 +00:00
void * sBuffer , size_t sBufferSize ,
changed initStatic?Dict() return type to const ZSTD_?Dict*
ZSTD_create?Dict() is required to produce a ?Dict* return type
because `free()` does not accept a `const type*` argument.
If it wasn't for this restriction, I would have preferred to create a `const ?Dict*` object
to emphasize the fact that, once created, a dictionary never changes
(hence can be shared concurrently until the end of its lifetime).
There is no such limitation with initStatic?Dict() :
as stated in the doc, there is no corresponding free() function,
since `workspace` is provided, hence allocated, externally,
it can only be free() externally.
Which means, ZSTD_initStatic?Dict() can return a `const ZSTD_?Dict*` pointer.
Tested with `make all`, to catch initStatic's users,
which, incidentally, also updated zstd.h documentation.
2018-01-17 22:08:48 +00:00
const void * dict , size_t dictSize ,
2018-03-20 21:52:02 +00:00
ZSTD_dictLoadMethod_e dictLoadMethod ,
2018-03-20 22:13:14 +00:00
ZSTD_dictContentType_e dictContentType )
2017-05-25 22:44:06 +00:00
{
2018-09-07 00:07:53 +00:00
size_t const neededSpace = sizeof ( ZSTD_DDict )
+ ( dictLoadMethod = = ZSTD_dlm_byRef ? 0 : dictSize ) ;
ZSTD_DDict * const ddict = ( ZSTD_DDict * ) sBuffer ;
assert ( sBuffer ! = NULL ) ;
2017-07-03 19:31:55 +00:00
assert ( dict ! = NULL ) ;
2018-09-07 00:07:53 +00:00
if ( ( size_t ) sBuffer & 7 ) return NULL ; /* 8-aligned */
if ( sBufferSize < neededSpace ) return NULL ;
2017-08-29 18:55:02 +00:00
if ( dictLoadMethod = = ZSTD_dlm_byCopy ) {
2017-05-25 22:44:06 +00:00
memcpy ( ddict + 1 , dict , dictSize ) ; /* local copy */
dict = ddict + 1 ;
}
2018-09-07 00:07:53 +00:00
if ( ZSTD_isError ( ZSTD_initDDict_internal ( ddict ,
dict , dictSize ,
ZSTD_dlm_byRef , dictContentType ) ) )
2017-05-25 22:44:06 +00:00
return NULL ;
return ddict ;
}
2016-06-06 22:51:51 +00:00
size_t ZSTD_freeDDict ( ZSTD_DDict * ddict )
{
2016-08-29 04:05:43 +00:00
if ( ddict = = NULL ) return 0 ; /* support free on NULL */
2017-02-27 08:27:30 +00:00
{ ZSTD_customMem const cMem = ddict - > cMem ;
2016-12-21 15:44:35 +00:00
ZSTD_free ( ddict - > dictBuffer , cMem ) ;
2016-08-29 04:05:43 +00:00
ZSTD_free ( ddict , cMem ) ;
return 0 ;
}
2016-06-06 22:51:51 +00:00
}
2017-05-09 00:51:49 +00:00
/*! ZSTD_estimateDDictSize() :
* Estimate amount of memory that will be needed to create a dictionary for decompression .
2017-08-29 18:55:02 +00:00
* Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
size_t ZSTD_estimateDDictSize ( size_t dictSize , ZSTD_dictLoadMethod_e dictLoadMethod )
2017-05-09 00:51:49 +00:00
{
2017-08-29 18:55:02 +00:00
return sizeof ( ZSTD_DDict ) + ( dictLoadMethod = = ZSTD_dlm_byRef ? 0 : dictSize ) ;
2017-05-09 00:51:49 +00:00
}
2016-09-14 14:55:44 +00:00
size_t ZSTD_sizeof_DDict ( const ZSTD_DDict * ddict )
{
2016-09-15 00:50:27 +00:00
if ( ddict = = NULL ) return 0 ; /* support sizeof on NULL */
2017-02-27 08:27:30 +00:00
return sizeof ( * ddict ) + ( ddict - > dictBuffer ? ddict - > dictSize : 0 ) ;
2016-09-14 14:55:44 +00:00
}
2016-12-06 00:21:06 +00:00
/*! ZSTD_getDictID_fromDict() :
* Provides the dictID stored within dictionary .
* if @ return = = 0 , the dictionary is not conformant with Zstandard specification .
* It can still be loaded , but as a content - only dictionary . */
unsigned ZSTD_getDictID_fromDict ( const void * dict , size_t dictSize )
{
if ( dictSize < 8 ) return 0 ;
2017-06-27 20:50:34 +00:00
if ( MEM_readLE32 ( dict ) ! = ZSTD_MAGIC_DICTIONARY ) return 0 ;
2018-08-14 19:56:21 +00:00
return MEM_readLE32 ( ( const char * ) dict + ZSTD_FRAMEIDSIZE ) ;
2016-12-06 00:21:06 +00:00
}
/*! ZSTD_getDictID_fromDDict() :
* Provides the dictID of the dictionary loaded into ` ddict ` .
* If @ return = = 0 , the dictionary is not conformant to Zstandard specification , or empty .
* Non - conformant dictionaries can still be loaded , but as content - only dictionaries . */
unsigned ZSTD_getDictID_fromDDict ( const ZSTD_DDict * ddict )
{
if ( ddict = = NULL ) return 0 ;
2016-12-21 15:44:35 +00:00
return ZSTD_getDictID_fromDict ( ddict - > dictContent , ddict - > dictSize ) ;
2016-12-06 00:21:06 +00:00
}
/*! ZSTD_getDictID_fromFrame() :
2017-05-01 18:22:24 +00:00
* Provides the dictID required to decompresse frame stored within ` src ` .
2016-12-06 00:21:06 +00:00
* If @ return = = 0 , the dictID could not be decoded .
* This could for one of the following reasons :
2017-05-01 18:22:24 +00:00
* - The frame does not require a dictionary ( most common case ) .
* - The frame was built with dictID intentionally removed .
* Needed dictionary is a hidden information .
2016-12-06 00:21:06 +00:00
* Note : this use case also happens when using a non - conformant dictionary .
2017-05-01 18:22:24 +00:00
* - ` srcSize ` is too small , and as a result , frame header could not be decoded .
* Note : possible if ` srcSize < ZSTD_FRAMEHEADERSIZE_MAX ` .
2016-12-06 00:21:06 +00:00
* - This is not a Zstandard frame .
2017-05-01 18:22:24 +00:00
* When identifying the exact failure cause , it ' s possible to use
2017-05-09 22:46:07 +00:00
* ZSTD_getFrameHeader ( ) , which will provide a more precise error code . */
2016-12-06 00:21:06 +00:00
unsigned ZSTD_getDictID_fromFrame ( const void * src , size_t srcSize )
{
2017-09-09 08:03:29 +00:00
ZSTD_frameHeader zfp = { 0 , 0 , 0 , ZSTD_frame , 0 , 0 , 0 } ;
2017-05-09 22:46:07 +00:00
size_t const hError = ZSTD_getFrameHeader ( & zfp , src , srcSize ) ;
2016-12-06 00:21:06 +00:00
if ( ZSTD_isError ( hError ) ) return 0 ;
return zfp . dictID ;
}
2016-09-14 14:55:44 +00:00
2016-06-06 22:51:51 +00:00
/*! ZSTD_decompress_usingDDict() :
* Decompression using a pre - digested Dictionary
2016-06-09 22:12:26 +00:00
* Use dictionary without significant overhead . */
2016-09-14 14:55:44 +00:00
size_t ZSTD_decompress_usingDDict ( ZSTD_DCtx * dctx ,
void * dst , size_t dstCapacity ,
const void * src , size_t srcSize ,
const ZSTD_DDict * ddict )
2016-06-06 22:51:51 +00:00
{
2017-02-08 00:16:55 +00:00
/* pass content and size in case legacy frames are encountered */
return ZSTD_decompressMultiFrame ( dctx , dst , dstCapacity , src , srcSize ,
2017-02-28 08:14:28 +00:00
NULL , 0 ,
2017-02-26 22:43:07 +00:00
ddict ) ;
2016-06-06 22:51:51 +00:00
}
2016-08-12 11:04:27 +00:00
/*=====================================
* Streaming decompression
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
ZSTD_DStream * ZSTD_createDStream ( void )
{
2017-12-30 14:12:59 +00:00
DEBUGLOG ( 3 , " ZSTD_createDStream " ) ;
2017-05-31 00:11:39 +00:00
return ZSTD_createDStream_advanced ( ZSTD_defaultCMem ) ;
2016-08-12 11:04:27 +00:00
}
2017-06-27 00:44:26 +00:00
ZSTD_DStream * ZSTD_initStaticDStream ( void * workspace , size_t workspaceSize )
{
return ZSTD_initStaticDCtx ( workspace , workspaceSize ) ;
}
2016-08-12 11:04:27 +00:00
ZSTD_DStream * ZSTD_createDStream_advanced ( ZSTD_customMem customMem )
{
2017-05-23 23:19:43 +00:00
return ZSTD_createDCtx_advanced ( customMem ) ;
2016-08-12 11:04:27 +00:00
}
size_t ZSTD_freeDStream ( ZSTD_DStream * zds )
{
2017-05-23 23:19:43 +00:00
return ZSTD_freeDCtx ( zds ) ;
2016-08-12 11:04:27 +00:00
}
2018-09-12 00:23:44 +00:00
/* *** Initialization *** */
2016-08-12 11:04:27 +00:00
2017-05-19 17:51:30 +00:00
size_t ZSTD_DStreamInSize ( void ) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize ; }
size_t ZSTD_DStreamOutSize ( void ) { return ZSTD_BLOCKSIZE_MAX ; }
2016-08-12 11:04:27 +00:00
2018-09-12 00:23:44 +00:00
size_t ZSTD_DCtx_loadDictionary_advanced ( ZSTD_DCtx * dctx ,
const void * dict , size_t dictSize ,
ZSTD_dictLoadMethod_e dictLoadMethod ,
ZSTD_dictContentType_e dictContentType )
2018-03-20 20:40:29 +00:00
{
2018-03-20 22:43:49 +00:00
if ( dctx - > streamStage ! = zdss_init ) return ERROR ( stage_wrong ) ;
2018-03-20 20:40:29 +00:00
ZSTD_freeDDict ( dctx - > ddictLocal ) ;
if ( dict & & dictSize > = 8 ) {
2018-03-20 22:13:14 +00:00
dctx - > ddictLocal = ZSTD_createDDict_advanced ( dict , dictSize , dictLoadMethod , dictContentType , dctx - > customMem ) ;
2018-03-20 20:40:29 +00:00
if ( dctx - > ddictLocal = = NULL ) return ERROR ( memory_allocation ) ;
} else {
dctx - > ddictLocal = NULL ;
}
dctx - > ddict = dctx - > ddictLocal ;
return 0 ;
}
size_t ZSTD_DCtx_loadDictionary_byReference ( ZSTD_DCtx * dctx , const void * dict , size_t dictSize )
{
2018-03-20 22:13:14 +00:00
return ZSTD_DCtx_loadDictionary_advanced ( dctx , dict , dictSize , ZSTD_dlm_byRef , ZSTD_dct_auto ) ;
2018-03-20 20:40:29 +00:00
}
size_t ZSTD_DCtx_loadDictionary ( ZSTD_DCtx * dctx , const void * dict , size_t dictSize )
{
2018-03-20 22:13:14 +00:00
return ZSTD_DCtx_loadDictionary_advanced ( dctx , dict , dictSize , ZSTD_dlm_byCopy , ZSTD_dct_auto ) ;
2018-03-20 20:40:29 +00:00
}
2018-03-20 22:45:56 +00:00
size_t ZSTD_DCtx_refPrefix_advanced ( ZSTD_DCtx * dctx , const void * prefix , size_t prefixSize , ZSTD_dictContentType_e dictContentType )
{
return ZSTD_DCtx_loadDictionary_advanced ( dctx , prefix , prefixSize , ZSTD_dlm_byRef , dictContentType ) ;
}
size_t ZSTD_DCtx_refPrefix ( ZSTD_DCtx * dctx , const void * prefix , size_t prefixSize )
{
return ZSTD_DCtx_refPrefix_advanced ( dctx , prefix , prefixSize , ZSTD_dct_rawContent ) ;
}
2018-03-20 22:43:49 +00:00
/* ZSTD_initDStream_usingDict() :
* return : expected size , aka ZSTD_frameHeaderSize_prefix .
* this function cannot fail */
2016-08-12 11:04:27 +00:00
size_t ZSTD_initDStream_usingDict ( ZSTD_DStream * zds , const void * dict , size_t dictSize )
{
2017-12-30 14:12:59 +00:00
DEBUGLOG ( 4 , " ZSTD_initDStream_usingDict " ) ;
2018-03-20 22:43:49 +00:00
zds - > streamStage = zdss_init ;
error on no forward progress
streaming decoders, such as ZSTD_decompressStream() or ZSTD_decompress_generic(),
may end up making no forward progress,
(aka no byte read from input __and__ no byte written to output),
due to unusual parameters conditions,
such as providing an output buffer already full.
In such case, the caller may be caught in an infinite loop,
calling the streaming decompression function again and again,
without making any progress.
This version detects such situation, and generates an error instead :
ZSTD_error_dstSize_tooSmall when output buffer is full,
ZSTD_error_srcSize_wrong when input buffer is empty.
The detection tolerates a number of attempts before triggering an error,
controlled by ZSTD_NO_FORWARD_PROGRESS_MAX macro constant,
which is set to 16 by default, and can be re-defined at compilation time.
This behavior tolerates potentially existing implementations
where such cases happen sporadically, like once or twice,
which is not dangerous (only infinite loops are),
without generating an error, hence without breaking these implementations.
2018-06-23 00:58:21 +00:00
zds - > noForwardProgress = 0 ;
2018-03-20 20:40:29 +00:00
CHECK_F ( ZSTD_DCtx_loadDictionary ( zds , dict , dictSize ) ) ;
2018-03-20 23:16:13 +00:00
return ZSTD_frameHeaderSize_prefix ;
2016-08-12 11:04:27 +00:00
}
2017-09-25 23:21:17 +00:00
/* note : this variant can't fail */
2016-08-12 11:04:27 +00:00
size_t ZSTD_initDStream ( ZSTD_DStream * zds )
{
2017-12-30 14:12:59 +00:00
DEBUGLOG ( 4 , " ZSTD_initDStream " ) ;
2016-08-12 11:04:27 +00:00
return ZSTD_initDStream_usingDict ( zds , NULL , 0 ) ;
}
2017-05-01 18:22:24 +00:00
/* ZSTD_initDStream_usingDDict() :
2017-09-25 23:21:17 +00:00
* ddict will just be referenced , and must outlive decompression session
* this function cannot fail */
2018-03-20 23:16:13 +00:00
size_t ZSTD_initDStream_usingDDict ( ZSTD_DStream * dctx , const ZSTD_DDict * ddict )
2016-10-26 00:47:02 +00:00
{
2018-03-20 23:16:13 +00:00
size_t const initResult = ZSTD_initDStream ( dctx ) ;
dctx - > ddict = ddict ;
2016-10-26 00:47:02 +00:00
return initResult ;
}
2018-03-20 22:43:49 +00:00
/* ZSTD_resetDStream() :
* return : expected size , aka ZSTD_frameHeaderSize_prefix .
* this function cannot fail */
2018-03-20 23:16:13 +00:00
size_t ZSTD_resetDStream ( ZSTD_DStream * dctx )
2016-09-14 14:55:44 +00:00
{
2017-12-30 14:12:59 +00:00
DEBUGLOG ( 4 , " ZSTD_resetDStream " ) ;
2018-03-20 23:16:13 +00:00
dctx - > streamStage = zdss_loadHeader ;
dctx - > lhSize = dctx - > inPos = dctx - > outStart = dctx - > outEnd = 0 ;
dctx - > legacyVersion = 0 ;
dctx - > hostageByte = 0 ;
2016-09-14 14:55:44 +00:00
return ZSTD_frameHeaderSize_prefix ;
}
2018-03-20 23:16:13 +00:00
size_t ZSTD_setDStreamParameter ( ZSTD_DStream * dctx ,
2016-08-23 14:58:10 +00:00
ZSTD_DStreamParameter_e paramType , unsigned paramValue )
{
2018-03-20 23:16:13 +00:00
if ( dctx - > streamStage ! = zdss_init ) return ERROR ( stage_wrong ) ;
2016-08-23 14:58:10 +00:00
switch ( paramType )
{
2017-07-14 00:12:16 +00:00
default : return ERROR ( parameter_unsupported ) ;
2017-09-25 21:26:26 +00:00
case DStream_p_maxWindowSize :
DEBUGLOG ( 4 , " setting maxWindowSize = %u KB " , paramValue > > 10 ) ;
2018-03-20 23:16:13 +00:00
dctx - > maxWindowSize = paramValue ? paramValue : ( U32 ) ( - 1 ) ;
2017-09-25 21:26:26 +00:00
break ;
2016-08-23 14:58:10 +00:00
}
return 0 ;
}
2018-09-12 00:23:44 +00:00
size_t ZSTD_DCtx_refDDict ( ZSTD_DCtx * dctx , const ZSTD_DDict * ddict )
{
if ( dctx - > streamStage ! = zdss_init ) return ERROR ( stage_wrong ) ;
dctx - > ddict = ddict ;
return 0 ;
}
2017-09-25 21:26:26 +00:00
size_t ZSTD_DCtx_setMaxWindowSize ( ZSTD_DCtx * dctx , size_t maxWindowSize )
{
2018-03-20 23:16:13 +00:00
if ( dctx - > streamStage ! = zdss_init ) return ERROR ( stage_wrong ) ;
2017-09-25 21:26:26 +00:00
dctx - > maxWindowSize = maxWindowSize ;
return 0 ;
}
size_t ZSTD_DCtx_setFormat ( ZSTD_DCtx * dctx , ZSTD_format_e format )
{
2017-09-25 22:12:09 +00:00
DEBUGLOG ( 4 , " ZSTD_DCtx_setFormat : %u " , ( unsigned ) format ) ;
2018-03-20 23:16:13 +00:00
if ( dctx - > streamStage ! = zdss_init ) return ERROR ( stage_wrong ) ;
2017-09-25 21:26:26 +00:00
dctx - > format = format ;
return 0 ;
}
2016-08-23 14:58:10 +00:00
2018-03-20 23:16:13 +00:00
size_t ZSTD_sizeof_DStream ( const ZSTD_DStream * dctx )
2016-08-22 22:30:31 +00:00
{
2018-03-20 23:16:13 +00:00
return ZSTD_sizeof_DCtx ( dctx ) ;
2016-08-22 22:30:31 +00:00
}
2017-09-09 08:03:29 +00:00
size_t ZSTD_decodingBufferSize_min ( unsigned long long windowSize , unsigned long long frameContentSize )
{
size_t const blockSize = ( size_t ) MIN ( windowSize , ZSTD_BLOCKSIZE_MAX ) ;
unsigned long long const neededRBSize = windowSize + blockSize + ( WILDCOPY_OVERLENGTH * 2 ) ;
unsigned long long const neededSize = MIN ( frameContentSize , neededRBSize ) ;
size_t const minRBSize = ( size_t ) neededSize ;
if ( ( unsigned long long ) minRBSize ! = neededSize ) return ERROR ( frameParameter_windowTooLarge ) ;
return minRBSize ;
}
2017-06-27 00:44:26 +00:00
size_t ZSTD_estimateDStreamSize ( size_t windowSize )
2017-05-09 23:18:17 +00:00
{
2017-05-19 17:51:30 +00:00
size_t const blockSize = MIN ( windowSize , ZSTD_BLOCKSIZE_MAX ) ;
2017-05-09 23:18:17 +00:00
size_t const inBuffSize = blockSize ; /* no block can be larger */
2017-09-09 08:03:29 +00:00
size_t const outBuffSize = ZSTD_decodingBufferSize_min ( windowSize , ZSTD_CONTENTSIZE_UNKNOWN ) ;
2017-08-13 10:29:42 +00:00
return ZSTD_estimateDCtxSize ( ) + inBuffSize + outBuffSize ;
2017-05-09 23:18:17 +00:00
}
2017-09-25 21:26:26 +00:00
size_t ZSTD_estimateDStreamSize_fromFrame ( const void * src , size_t srcSize )
2017-06-27 00:44:26 +00:00
{
2017-09-09 08:03:29 +00:00
U32 const windowSizeMax = 1U < < ZSTD_WINDOWLOG_MAX ; /* note : should be user-selectable */
2017-07-07 22:21:35 +00:00
ZSTD_frameHeader zfh ;
size_t const err = ZSTD_getFrameHeader ( & zfh , src , srcSize ) ;
2017-06-27 00:44:26 +00:00
if ( ZSTD_isError ( err ) ) return err ;
if ( err > 0 ) return ERROR ( srcSize_wrong ) ;
2017-07-07 22:32:12 +00:00
if ( zfh . windowSize > windowSizeMax )
return ERROR ( frameParameter_windowTooLarge ) ;
2017-07-08 00:13:12 +00:00
return ZSTD_estimateDStreamSize ( ( size_t ) zfh . windowSize ) ;
2017-06-27 00:44:26 +00:00
}
2016-08-12 11:04:27 +00:00
2016-09-09 14:44:16 +00:00
/* ***** Decompression ***** */
2016-08-12 11:04:27 +00:00
MEM_STATIC size_t ZSTD_limitCopy ( void * dst , size_t dstCapacity , const void * src , size_t srcSize )
{
size_t const length = MIN ( dstCapacity , srcSize ) ;
memcpy ( dst , src , length ) ;
return length ;
}
2016-08-16 23:39:22 +00:00
size_t ZSTD_decompressStream ( ZSTD_DStream * zds , ZSTD_outBuffer * output , ZSTD_inBuffer * input )
2016-08-12 11:04:27 +00:00
{
2016-08-16 23:39:22 +00:00
const char * const istart = ( const char * ) ( input - > src ) + input - > pos ;
const char * const iend = ( const char * ) ( input - > src ) + input - > size ;
2016-08-12 11:04:27 +00:00
const char * ip = istart ;
2016-08-16 23:39:22 +00:00
char * const ostart = ( char * ) ( output - > dst ) + output - > pos ;
char * const oend = ( char * ) ( output - > dst ) + output - > size ;
2016-08-12 11:04:27 +00:00
char * op = ostart ;
U32 someMoreWork = 1 ;
2017-06-21 22:53:42 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressStream " ) ;
2017-09-27 07:39:41 +00:00
if ( input - > pos > input - > size ) { /* forbidden */
DEBUGLOG ( 5 , " in: pos: %u vs size: %u " ,
( U32 ) input - > pos , ( U32 ) input - > size ) ;
2017-09-27 17:35:56 +00:00
return ERROR ( srcSize_wrong ) ;
2017-09-27 07:39:41 +00:00
}
if ( output - > pos > output - > size ) { /* forbidden */
DEBUGLOG ( 5 , " out: pos: %u vs size: %u " ,
( U32 ) output - > pos , ( U32 ) output - > size ) ;
2017-09-27 17:35:56 +00:00
return ERROR ( dstSize_tooSmall ) ;
2017-09-27 07:39:41 +00:00
}
2017-06-21 22:53:42 +00:00
DEBUGLOG ( 5 , " input size : %u " , ( U32 ) ( input - > size - input - > pos ) ) ;
2017-09-25 22:41:48 +00:00
2016-08-12 11:04:27 +00:00
while ( someMoreWork ) {
2017-05-23 23:19:43 +00:00
switch ( zds - > streamStage )
2016-08-12 11:04:27 +00:00
{
case zdss_init :
2018-03-21 00:48:22 +00:00
DEBUGLOG ( 5 , " stage zdss_init => transparent reset " ) ;
2016-12-03 02:37:38 +00:00
ZSTD_resetDStream ( zds ) ; /* transparent reset on starting decoding a new frame */
/* fall-through */
2016-08-12 11:04:27 +00:00
case zdss_loadHeader :
2017-09-25 22:12:09 +00:00
DEBUGLOG ( 5 , " stage zdss_loadHeader (srcSize : %u) " , ( U32 ) ( iend - ip ) ) ;
2018-03-21 00:48:22 +00:00
# if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
if ( zds - > legacyVersion ) {
/* legacy support is incompatible with static dctx */
if ( zds - > staticSize ) return ERROR ( memory_allocation ) ;
{ size_t const hint = ZSTD_decompressLegacyStream ( zds - > legacyContext , zds - > legacyVersion , output , input ) ;
if ( hint = = 0 ) zds - > streamStage = zdss_init ;
return hint ;
} }
# endif
2018-03-29 23:51:08 +00:00
{ size_t const hSize = ZSTD_getFrameHeader_advanced ( & zds - > fParams , zds - > headerBuffer , zds - > lhSize , zds - > format ) ;
2017-09-25 22:12:09 +00:00
DEBUGLOG ( 5 , " header size : %u " , ( U32 ) hSize ) ;
2017-05-24 22:42:24 +00:00
if ( ZSTD_isError ( hSize ) ) {
2016-08-28 14:43:34 +00:00
# if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
2017-05-24 22:42:24 +00:00
U32 const legacyVersion = ZSTD_isLegacy ( istart , iend - istart ) ;
2016-08-28 14:43:34 +00:00
if ( legacyVersion ) {
2016-12-21 15:44:35 +00:00
const void * const dict = zds - > ddict ? zds - > ddict - > dictContent : NULL ;
2016-10-26 00:47:02 +00:00
size_t const dictSize = zds - > ddict ? zds - > ddict - > dictSize : 0 ;
2018-03-21 00:48:22 +00:00
DEBUGLOG ( 5 , " ZSTD_decompressStream: detected legacy version v0.%u " , legacyVersion ) ;
2017-05-27 07:03:08 +00:00
/* legacy support is incompatible with static dctx */
if ( zds - > staticSize ) return ERROR ( memory_allocation ) ;
2017-07-07 22:21:35 +00:00
CHECK_F ( ZSTD_initLegacyStream ( & zds - > legacyContext ,
zds - > previousLegacyVersion , legacyVersion ,
dict , dictSize ) ) ;
2016-08-28 15:19:47 +00:00
zds - > legacyVersion = zds - > previousLegacyVersion = legacyVersion ;
2018-03-21 00:48:22 +00:00
{ size_t const hint = ZSTD_decompressLegacyStream ( zds - > legacyContext , legacyVersion , output , input ) ;
if ( hint = = 0 ) zds - > streamStage = zdss_init ; /* or stay in stage zdss_loadHeader */
return hint ;
} }
2016-08-28 14:43:34 +00:00
# endif
2017-12-12 22:01:54 +00:00
return hSize ; /* error */
2017-05-24 22:42:24 +00:00
}
2016-08-12 11:04:27 +00:00
if ( hSize ! = 0 ) { /* need more input */
size_t const toLoad = hSize - zds - > lhSize ; /* if hSize!=0, hSize > zds->lhSize */
2017-12-12 22:01:54 +00:00
size_t const remainingInput = ( size_t ) ( iend - ip ) ;
assert ( iend > = ip ) ;
if ( toLoad > remainingInput ) { /* not enough input to load full header */
if ( remainingInput > 0 ) {
memcpy ( zds - > headerBuffer + zds - > lhSize , ip , remainingInput ) ;
zds - > lhSize + = remainingInput ;
2017-07-03 19:31:55 +00:00
}
2016-08-16 23:39:22 +00:00
input - > pos = input - > size ;
2016-09-05 17:47:43 +00:00
return ( MAX ( ZSTD_frameHeaderSize_min , hSize ) - zds - > lhSize ) + ZSTD_blockHeaderSize ; /* remaining header bytes + next block header */
2016-08-12 11:04:27 +00:00
}
2017-07-03 19:31:55 +00:00
assert ( ip ! = NULL ) ;
2016-08-12 11:04:27 +00:00
memcpy ( zds - > headerBuffer + zds - > lhSize , ip , toLoad ) ; zds - > lhSize = hSize ; ip + = toLoad ;
break ;
} }
2017-05-24 20:50:10 +00:00
2017-02-28 08:14:28 +00:00
/* check for single-pass mode opportunity */
2017-02-28 09:15:28 +00:00
if ( zds - > fParams . frameContentSize & & zds - > fParams . windowSize /* skippable frame if == 0 */
2017-02-28 08:14:28 +00:00
& & ( U64 ) ( size_t ) ( oend - op ) > = zds - > fParams . frameContentSize ) {
size_t const cSize = ZSTD_findFrameCompressedSize ( istart , iend - istart ) ;
if ( cSize < = ( size_t ) ( iend - istart ) ) {
2017-12-12 22:01:54 +00:00
/* shortcut : using single-pass mode */
2017-05-23 23:19:43 +00:00
size_t const decompressedSize = ZSTD_decompress_usingDDict ( zds , op , oend - op , istart , cSize , zds - > ddict ) ;
2017-02-28 08:14:28 +00:00
if ( ZSTD_isError ( decompressedSize ) ) return decompressedSize ;
2017-12-12 22:01:54 +00:00
DEBUGLOG ( 4 , " shortcut to single-pass ZSTD_decompress_usingDDict() " )
2017-02-28 10:12:42 +00:00
ip = istart + cSize ;
2017-02-28 08:14:28 +00:00
op + = decompressedSize ;
2017-05-23 23:19:43 +00:00
zds - > expected = 0 ;
zds - > streamStage = zdss_init ;
2017-02-28 08:14:28 +00:00
someMoreWork = 0 ;
break ;
} }
2017-06-21 00:44:55 +00:00
/* Consume header (see ZSTDds_decodeFrameHeader) */
2017-06-28 18:09:43 +00:00
DEBUGLOG ( 4 , " Consume header " ) ;
2017-05-23 23:19:43 +00:00
CHECK_F ( ZSTD_decompressBegin_usingDDict ( zds , zds - > ddict ) ) ;
2017-06-21 22:53:42 +00:00
if ( ( MEM_readLE32 ( zds - > headerBuffer ) & 0xFFFFFFF0U ) = = ZSTD_MAGIC_SKIPPABLE_START ) { /* skippable frame */
2018-08-14 19:56:21 +00:00
zds - > expected = MEM_readLE32 ( zds - > headerBuffer + ZSTD_FRAMEIDSIZE ) ;
2017-06-21 22:53:42 +00:00
zds - > stage = ZSTDds_skipFrame ;
} else {
CHECK_F ( ZSTD_decodeFrameHeader ( zds , zds - > headerBuffer , zds - > lhSize ) ) ;
zds - > expected = ZSTD_blockHeaderSize ;
zds - > stage = ZSTDds_decodeBlockHeader ;
}
2016-08-12 11:04:27 +00:00
2017-06-21 00:44:55 +00:00
/* control buffer memory usage */
2017-12-12 22:01:54 +00:00
DEBUGLOG ( 4 , " Control max memory usage (%u KB <= max %u KB) " ,
( U32 ) ( zds - > fParams . windowSize > > 10 ) ,
( U32 ) ( zds - > maxWindowSize > > 10 ) ) ;
2016-08-12 11:04:27 +00:00
zds - > fParams . windowSize = MAX ( zds - > fParams . windowSize , 1U < < ZSTD_WINDOWLOG_ABSOLUTEMIN ) ;
2016-10-14 20:13:13 +00:00
if ( zds - > fParams . windowSize > zds - > maxWindowSize ) return ERROR ( frameParameter_windowTooLarge ) ;
2016-08-12 11:04:27 +00:00
2016-09-09 14:44:16 +00:00
/* Adapt buffer sizes to frame header instructions */
2017-09-09 21:37:28 +00:00
{ size_t const neededInBuffSize = MAX ( zds - > fParams . blockSizeMax , 4 /* frame checksum */ ) ;
size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min ( zds - > fParams . windowSize , zds - > fParams . frameContentSize ) ;
if ( ( zds - > inBuffSize < neededInBuffSize ) | | ( zds - > outBuffSize < neededOutBuffSize ) ) {
size_t const bufferSize = neededInBuffSize + neededOutBuffSize ;
2017-06-28 18:09:43 +00:00
DEBUGLOG ( 4 , " inBuff : from %u to %u " ,
2017-09-09 21:37:28 +00:00
( U32 ) zds - > inBuffSize , ( U32 ) neededInBuffSize ) ;
2017-06-28 18:09:43 +00:00
DEBUGLOG ( 4 , " outBuff : from %u to %u " ,
2017-09-09 21:37:28 +00:00
( U32 ) zds - > outBuffSize , ( U32 ) neededOutBuffSize ) ;
2017-05-25 00:41:41 +00:00
if ( zds - > staticSize ) { /* static DCtx */
2017-06-28 18:09:43 +00:00
DEBUGLOG ( 4 , " staticSize : %u " , ( U32 ) zds - > staticSize ) ;
2017-06-21 22:53:42 +00:00
assert ( zds - > staticSize > = sizeof ( ZSTD_DCtx ) ) ; /* controlled at init */
2017-05-25 00:41:41 +00:00
if ( bufferSize > zds - > staticSize - sizeof ( ZSTD_DCtx ) )
return ERROR ( memory_allocation ) ;
} else {
2017-05-24 20:50:10 +00:00
ZSTD_free ( zds - > inBuff , zds - > customMem ) ;
zds - > inBuffSize = 0 ;
2017-05-25 00:41:41 +00:00
zds - > outBuffSize = 0 ;
zds - > inBuff = ( char * ) ZSTD_malloc ( bufferSize , zds - > customMem ) ;
2017-05-24 20:50:10 +00:00
if ( zds - > inBuff = = NULL ) return ERROR ( memory_allocation ) ;
}
2017-09-09 21:37:28 +00:00
zds - > inBuffSize = neededInBuffSize ;
2017-05-24 22:42:24 +00:00
zds - > outBuff = zds - > inBuff + zds - > inBuffSize ;
2017-09-09 21:37:28 +00:00
zds - > outBuffSize = neededOutBuffSize ;
2016-08-12 11:04:27 +00:00
} }
2017-05-23 23:19:43 +00:00
zds - > streamStage = zdss_read ;
2017-08-08 19:32:26 +00:00
/* fall-through */
2016-08-12 11:04:27 +00:00
case zdss_read :
2017-06-21 22:53:42 +00:00
DEBUGLOG ( 5 , " stage zdss_read " ) ;
2017-05-23 23:19:43 +00:00
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompress ( zds ) ;
2017-06-21 22:53:42 +00:00
DEBUGLOG ( 5 , " neededInSize = %u " , ( U32 ) neededInSize ) ;
2016-08-12 11:04:27 +00:00
if ( neededInSize = = 0 ) { /* end of frame */
2017-05-23 23:19:43 +00:00
zds - > streamStage = zdss_init ;
2016-08-12 11:04:27 +00:00
someMoreWork = 0 ;
break ;
}
if ( ( size_t ) ( iend - ip ) > = neededInSize ) { /* decode directly from src */
2017-06-21 20:26:10 +00:00
int const isSkipFrame = ZSTD_isSkipFrame ( zds ) ;
2017-05-23 23:19:43 +00:00
size_t const decodedSize = ZSTD_decompressContinue ( zds ,
2016-08-12 11:04:27 +00:00
zds - > outBuff + zds - > outStart , ( isSkipFrame ? 0 : zds - > outBuffSize - zds - > outStart ) ,
ip , neededInSize ) ;
if ( ZSTD_isError ( decodedSize ) ) return decodedSize ;
ip + = neededInSize ;
if ( ! decodedSize & & ! isSkipFrame ) break ; /* this was just a header */
2016-09-09 14:44:16 +00:00
zds - > outEnd = zds - > outStart + decodedSize ;
2017-05-23 23:19:43 +00:00
zds - > streamStage = zdss_flush ;
2016-08-12 11:04:27 +00:00
break ;
2017-05-25 00:41:41 +00:00
} }
if ( ip = = iend ) { someMoreWork = 0 ; break ; } /* no more input */
zds - > streamStage = zdss_load ;
2017-08-08 19:32:26 +00:00
/* fall-through */
2018-03-21 00:48:22 +00:00
2016-08-12 11:04:27 +00:00
case zdss_load :
2017-05-23 23:19:43 +00:00
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompress ( zds ) ;
2017-11-01 19:49:51 +00:00
size_t const toLoad = neededInSize - zds - > inPos ;
int const isSkipFrame = ZSTD_isSkipFrame ( zds ) ;
2016-08-12 11:04:27 +00:00
size_t loadedSize ;
2017-11-01 19:49:51 +00:00
if ( isSkipFrame ) {
loadedSize = MIN ( toLoad , ( size_t ) ( iend - ip ) ) ;
} else {
if ( toLoad > zds - > inBuffSize - zds - > inPos ) return ERROR ( corruption_detected ) ; /* should never happen */
loadedSize = ZSTD_limitCopy ( zds - > inBuff + zds - > inPos , toLoad , ip , iend - ip ) ;
}
2016-08-12 11:04:27 +00:00
ip + = loadedSize ;
zds - > inPos + = loadedSize ;
if ( loadedSize < toLoad ) { someMoreWork = 0 ; break ; } /* not enough input, wait for more */
/* decode loaded input */
2017-11-01 19:49:51 +00:00
{ size_t const decodedSize = ZSTD_decompressContinue ( zds ,
2016-08-12 11:04:27 +00:00
zds - > outBuff + zds - > outStart , zds - > outBuffSize - zds - > outStart ,
zds - > inBuff , neededInSize ) ;
if ( ZSTD_isError ( decodedSize ) ) return decodedSize ;
zds - > inPos = 0 ; /* input is consumed */
2017-05-23 23:19:43 +00:00
if ( ! decodedSize & & ! isSkipFrame ) { zds - > streamStage = zdss_read ; break ; } /* this was just a header */
2016-08-12 11:04:27 +00:00
zds - > outEnd = zds - > outStart + decodedSize ;
} }
2017-05-25 00:41:41 +00:00
zds - > streamStage = zdss_flush ;
2017-08-08 19:32:26 +00:00
/* fall-through */
2018-03-21 00:48:22 +00:00
2016-08-12 11:04:27 +00:00
case zdss_flush :
{ size_t const toFlushSize = zds - > outEnd - zds - > outStart ;
size_t const flushedSize = ZSTD_limitCopy ( op , oend - op , zds - > outBuff + zds - > outStart , toFlushSize ) ;
op + = flushedSize ;
zds - > outStart + = flushedSize ;
if ( flushedSize = = toFlushSize ) { /* flush completed */
2017-05-23 23:19:43 +00:00
zds - > streamStage = zdss_read ;
2017-09-09 21:37:28 +00:00
if ( ( zds - > outBuffSize < zds - > fParams . frameContentSize )
& & ( zds - > outStart + zds - > fParams . blockSizeMax > zds - > outBuffSize ) ) {
DEBUGLOG ( 5 , " restart filling outBuff from beginning (left:%i, needed:%u) " ,
( int ) ( zds - > outBuffSize - zds - > outStart ) ,
( U32 ) zds - > fParams . blockSizeMax ) ;
2016-08-12 11:04:27 +00:00
zds - > outStart = zds - > outEnd = 0 ;
2017-09-09 21:37:28 +00:00
}
2016-08-12 11:04:27 +00:00
break ;
2017-05-25 00:41:41 +00:00
} }
/* cannot complete flush */
someMoreWork = 0 ;
break ;
2016-08-12 11:04:27 +00:00
default : return ERROR ( GENERIC ) ; /* impossible */
} }
/* result */
error on no forward progress
streaming decoders, such as ZSTD_decompressStream() or ZSTD_decompress_generic(),
may end up making no forward progress,
(aka no byte read from input __and__ no byte written to output),
due to unusual parameters conditions,
such as providing an output buffer already full.
In such case, the caller may be caught in an infinite loop,
calling the streaming decompression function again and again,
without making any progress.
This version detects such situation, and generates an error instead :
ZSTD_error_dstSize_tooSmall when output buffer is full,
ZSTD_error_srcSize_wrong when input buffer is empty.
The detection tolerates a number of attempts before triggering an error,
controlled by ZSTD_NO_FORWARD_PROGRESS_MAX macro constant,
which is set to 16 by default, and can be re-defined at compilation time.
This behavior tolerates potentially existing implementations
where such cases happen sporadically, like once or twice,
which is not dangerous (only infinite loops are),
without generating an error, hence without breaking these implementations.
2018-06-23 00:58:21 +00:00
input - > pos = ( size_t ) ( ip - ( const char * ) ( input - > src ) ) ;
output - > pos = ( size_t ) ( op - ( char * ) ( output - > dst ) ) ;
if ( ( ip = = istart ) & & ( op = = ostart ) ) { /* no forward progress */
zds - > noForwardProgress + + ;
if ( zds - > noForwardProgress > = ZSTD_NO_FORWARD_PROGRESS_MAX ) {
if ( op = = oend ) return ERROR ( dstSize_tooSmall ) ;
if ( ip = = iend ) return ERROR ( srcSize_wrong ) ;
assert ( 0 ) ;
}
} else {
zds - > noForwardProgress = 0 ;
}
2017-05-23 23:19:43 +00:00
{ size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress ( zds ) ;
2016-09-09 14:44:16 +00:00
if ( ! nextSrcSizeHint ) { /* frame fully decoded */
if ( zds - > outEnd = = zds - > outStart ) { /* output fully flushed */
if ( zds - > hostageByte ) {
2017-05-23 23:19:43 +00:00
if ( input - > pos > = input - > size ) {
/* can't release hostage (not present) */
zds - > streamStage = zdss_read ;
return 1 ;
}
2016-09-09 14:44:16 +00:00
input - > pos + + ; /* release hostage */
2017-05-25 00:41:41 +00:00
} /* zds->hostageByte */
2016-09-09 14:44:16 +00:00
return 0 ;
2017-05-25 00:41:41 +00:00
} /* zds->outEnd == zds->outStart */
2016-09-09 14:44:16 +00:00
if ( ! zds - > hostageByte ) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
input - > pos - - ; /* note : pos > 0, otherwise, impossible to finish reading last block */
zds - > hostageByte = 1 ;
}
return 1 ;
2017-05-25 00:41:41 +00:00
} /* nextSrcSizeHint==0 */
2017-05-23 23:19:43 +00:00
nextSrcSizeHint + = ZSTD_blockHeaderSize * ( ZSTD_nextInputType ( zds ) = = ZSTDnit_block ) ; /* preload header of next block */
2018-03-20 23:16:13 +00:00
assert ( zds - > inPos < = nextSrcSizeHint ) ;
nextSrcSizeHint - = zds - > inPos ; /* part already loaded*/
2016-08-12 11:04:27 +00:00
return nextSrcSizeHint ;
}
}
2017-09-25 22:41:48 +00:00
size_t ZSTD_decompress_generic ( ZSTD_DCtx * dctx , ZSTD_outBuffer * output , ZSTD_inBuffer * input )
{
return ZSTD_decompressStream ( dctx , output , input ) ;
}
2017-09-25 22:44:48 +00:00
size_t ZSTD_decompress_generic_simpleArgs (
ZSTD_DCtx * dctx ,
void * dst , size_t dstCapacity , size_t * dstPos ,
const void * src , size_t srcSize , size_t * srcPos )
{
ZSTD_outBuffer output = { dst , dstCapacity , * dstPos } ;
ZSTD_inBuffer input = { src , srcSize , * srcPos } ;
/* ZSTD_compress_generic() will check validity of dstPos and srcPos */
size_t const cErr = ZSTD_decompress_generic ( dctx , & output , & input ) ;
* dstPos = output . pos ;
* srcPos = input . pos ;
return cErr ;
}
2017-09-25 23:21:17 +00:00
void ZSTD_DCtx_reset ( ZSTD_DCtx * dctx )
{
( void ) ZSTD_initDStream ( dctx ) ;
dctx - > format = ZSTD_f_zstd1 ;
dctx - > maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT ;
}