zstd/lib/compress/zstd_compress.c
W. Felix Handte 13c5ec3e41 Only Allow Dedicated Dict Search for Dicts Loaded in 1 Chunk
The load algorithm requires we do it all in one go.
2020-09-10 22:10:02 -04:00

4446 lines
184 KiB
C

/*
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/*-*************************************
* Dependencies
***************************************/
#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
#include "../common/cpu.h"
#include "../common/mem.h"
#include "hist.h" /* HIST_countFast_wksp */
#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
#include "../common/fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "zstd_compress_internal.h"
#include "zstd_compress_sequences.h"
#include "zstd_compress_literals.h"
#include "zstd_fast.h"
#include "zstd_double_fast.h"
#include "zstd_lazy.h"
#include "zstd_opt.h"
#include "zstd_ldm.h"
#include "zstd_compress_superblock.h"
/*-*************************************
* Helper functions
***************************************/
/* ZSTD_compressBound()
* Note that the result from this function is only compatible with the "normal"
* full-block strategy.
* When there are a lot of small blocks due to frequent flush in streaming mode
* the overhead of headers can make the compressed data to be larger than the
* return value of ZSTD_compressBound().
*/
size_t ZSTD_compressBound(size_t srcSize) {
return ZSTD_COMPRESSBOUND(srcSize);
}
/*-*************************************
* Context memory management
***************************************/
struct ZSTD_CDict_s {
const void* dictContent;
size_t dictContentSize;
U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
ZSTD_cwksp workspace;
ZSTD_matchState_t matchState;
ZSTD_compressedBlockState_t cBlockState;
ZSTD_customMem customMem;
U32 dictID;
int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
}; /* typedef'd to ZSTD_CDict within "zstd.h" */
ZSTD_CCtx* ZSTD_createCCtx(void)
{
return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
}
static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
{
assert(cctx != NULL);
ZSTD_memset(cctx, 0, sizeof(*cctx));
cctx->customMem = memManager;
cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
{ size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
assert(!ZSTD_isError(err));
(void)err;
}
}
ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
{
ZSTD_STATIC_ASSERT(zcss_init==0);
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
{ ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
if (!cctx) return NULL;
ZSTD_initCCtx(cctx, customMem);
return cctx;
}
}
ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
{
ZSTD_cwksp ws;
ZSTD_CCtx* cctx;
if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
ZSTD_cwksp_init(&ws, workspace, workspaceSize);
cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
if (cctx == NULL) return NULL;
ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
ZSTD_cwksp_move(&cctx->workspace, &ws);
cctx->staticSize = workspaceSize;
/* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
return cctx;
}
/**
* Clears and frees all of the dictionaries in the CCtx.
*/
static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
{
ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
ZSTD_freeCDict(cctx->localDict.cdict);
ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
cctx->cdict = NULL;
}
static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
{
size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
return bufferSize + cdictSize;
}
static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
{
assert(cctx != NULL);
assert(cctx->staticSize == 0);
ZSTD_clearAllDicts(cctx);
#ifdef ZSTD_MULTITHREAD
ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
#endif
ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
}
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
{
if (cctx==NULL) return 0; /* support free on NULL */
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
"not compatible with static CCtx");
{
int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
ZSTD_freeCCtxContent(cctx);
if (!cctxInWorkspace) {
ZSTD_customFree(cctx, cctx->customMem);
}
}
return 0;
}
static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
{
#ifdef ZSTD_MULTITHREAD
return ZSTDMT_sizeof_CCtx(cctx->mtctx);
#else
(void)cctx;
return 0;
#endif
}
size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
{
if (cctx==NULL) return 0; /* support sizeof on NULL */
/* cctx may be in the workspace */
return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
+ ZSTD_cwksp_sizeof(&cctx->workspace)
+ ZSTD_sizeof_localDict(cctx->localDict)
+ ZSTD_sizeof_mtctx(cctx);
}
size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
{
return ZSTD_sizeof_CCtx(zcs); /* same object */
}
/* private API call, for dictBuilder only */
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
ZSTD_compressionParameters cParams)
{
ZSTD_CCtx_params cctxParams;
ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
cctxParams.cParams = cParams;
cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
assert(!ZSTD_checkCParams(cParams));
cctxParams.fParams.contentSizeFlag = 1;
return cctxParams;
}
static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
ZSTD_customMem customMem)
{
ZSTD_CCtx_params* params;
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
sizeof(ZSTD_CCtx_params), customMem);
if (!params) { return NULL; }
params->customMem = customMem;
params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
params->fParams.contentSizeFlag = 1;
return params;
}
ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
{
return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
}
size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
{
if (params == NULL) { return 0; }
ZSTD_customFree(params, params->customMem);
return 0;
}
size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
{
return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
}
size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
cctxParams->compressionLevel = compressionLevel;
cctxParams->fParams.contentSizeFlag = 1;
return 0;
}
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
{
RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
assert(!ZSTD_checkCParams(params.cParams));
cctxParams->cParams = params.cParams;
cctxParams->fParams = params.fParams;
cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
return 0;
}
/* ZSTD_assignParamsToCCtxParams() :
* params is presumed valid at this stage */
static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
const ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
{
ZSTD_CCtx_params ret = *cctxParams;
assert(!ZSTD_checkCParams(params->cParams));
ret.cParams = params->cParams;
ret.fParams = params->fParams;
ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
return ret;
}
ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
{
ZSTD_bounds bounds = { 0, 0, 0 };
switch(param)
{
case ZSTD_c_compressionLevel:
bounds.lowerBound = ZSTD_minCLevel();
bounds.upperBound = ZSTD_maxCLevel();
return bounds;
case ZSTD_c_windowLog:
bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
bounds.upperBound = ZSTD_WINDOWLOG_MAX;
return bounds;
case ZSTD_c_hashLog:
bounds.lowerBound = ZSTD_HASHLOG_MIN;
bounds.upperBound = ZSTD_HASHLOG_MAX;
return bounds;
case ZSTD_c_chainLog:
bounds.lowerBound = ZSTD_CHAINLOG_MIN;
bounds.upperBound = ZSTD_CHAINLOG_MAX;
return bounds;
case ZSTD_c_searchLog:
bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
bounds.upperBound = ZSTD_SEARCHLOG_MAX;
return bounds;
case ZSTD_c_minMatch:
bounds.lowerBound = ZSTD_MINMATCH_MIN;
bounds.upperBound = ZSTD_MINMATCH_MAX;
return bounds;
case ZSTD_c_targetLength:
bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
return bounds;
case ZSTD_c_strategy:
bounds.lowerBound = ZSTD_STRATEGY_MIN;
bounds.upperBound = ZSTD_STRATEGY_MAX;
return bounds;
case ZSTD_c_contentSizeFlag:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_checksumFlag:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_dictIDFlag:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_nbWorkers:
bounds.lowerBound = 0;
#ifdef ZSTD_MULTITHREAD
bounds.upperBound = ZSTDMT_NBWORKERS_MAX;
#else
bounds.upperBound = 0;
#endif
return bounds;
case ZSTD_c_jobSize:
bounds.lowerBound = 0;
#ifdef ZSTD_MULTITHREAD
bounds.upperBound = ZSTDMT_JOBSIZE_MAX;
#else
bounds.upperBound = 0;
#endif
return bounds;
case ZSTD_c_overlapLog:
#ifdef ZSTD_MULTITHREAD
bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;
bounds.upperBound = ZSTD_OVERLAPLOG_MAX;
#else
bounds.lowerBound = 0;
bounds.upperBound = 0;
#endif
return bounds;
case ZSTD_c_enableDedicatedDictSearch:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_enableLongDistanceMatching:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_ldmHashLog:
bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
return bounds;
case ZSTD_c_ldmMinMatch:
bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
return bounds;
case ZSTD_c_ldmBucketSizeLog:
bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
return bounds;
case ZSTD_c_ldmHashRateLog:
bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
return bounds;
/* experimental parameters */
case ZSTD_c_rsyncable:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_forceMaxWindow :
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_format:
ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
bounds.lowerBound = ZSTD_f_zstd1;
bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
return bounds;
case ZSTD_c_forceAttachDict:
ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
bounds.lowerBound = ZSTD_dictDefaultAttach;
bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
return bounds;
case ZSTD_c_literalCompressionMode:
ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
bounds.lowerBound = ZSTD_lcm_auto;
bounds.upperBound = ZSTD_lcm_uncompressed;
return bounds;
case ZSTD_c_targetCBlockSize:
bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
return bounds;
case ZSTD_c_srcSizeHint:
bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
return bounds;
default:
bounds.error = ERROR(parameter_unsupported);
return bounds;
}
}
/* ZSTD_cParam_clampBounds:
* Clamps the value into the bounded range.
*/
static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
{
ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
if (ZSTD_isError(bounds.error)) return bounds.error;
if (*value < bounds.lowerBound) *value = bounds.lowerBound;
if (*value > bounds.upperBound) *value = bounds.upperBound;
return 0;
}
#define BOUNDCHECK(cParam, val) { \
RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
parameter_outOfBound, "Param out of bounds"); \
}
static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
{
switch(param)
{
case ZSTD_c_compressionLevel:
case ZSTD_c_hashLog:
case ZSTD_c_chainLog:
case ZSTD_c_searchLog:
case ZSTD_c_minMatch:
case ZSTD_c_targetLength:
case ZSTD_c_strategy:
return 1;
case ZSTD_c_format:
case ZSTD_c_windowLog:
case ZSTD_c_contentSizeFlag:
case ZSTD_c_checksumFlag:
case ZSTD_c_dictIDFlag:
case ZSTD_c_forceMaxWindow :
case ZSTD_c_nbWorkers:
case ZSTD_c_jobSize:
case ZSTD_c_overlapLog:
case ZSTD_c_rsyncable:
case ZSTD_c_enableDedicatedDictSearch:
case ZSTD_c_enableLongDistanceMatching:
case ZSTD_c_ldmHashLog:
case ZSTD_c_ldmMinMatch:
case ZSTD_c_ldmBucketSizeLog:
case ZSTD_c_ldmHashRateLog:
case ZSTD_c_forceAttachDict:
case ZSTD_c_literalCompressionMode:
case ZSTD_c_targetCBlockSize:
case ZSTD_c_srcSizeHint:
default:
return 0;
}
}
size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
{
DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
if (cctx->streamStage != zcss_init) {
if (ZSTD_isUpdateAuthorized(param)) {
cctx->cParamsChanged = 1;
} else {
RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
} }
switch(param)
{
case ZSTD_c_nbWorkers:
RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
"MT not compatible with static alloc");
break;
case ZSTD_c_compressionLevel:
case ZSTD_c_windowLog:
case ZSTD_c_hashLog:
case ZSTD_c_chainLog:
case ZSTD_c_searchLog:
case ZSTD_c_minMatch:
case ZSTD_c_targetLength:
case ZSTD_c_strategy:
case ZSTD_c_ldmHashRateLog:
case ZSTD_c_format:
case ZSTD_c_contentSizeFlag:
case ZSTD_c_checksumFlag:
case ZSTD_c_dictIDFlag:
case ZSTD_c_forceMaxWindow:
case ZSTD_c_forceAttachDict:
case ZSTD_c_literalCompressionMode:
case ZSTD_c_jobSize:
case ZSTD_c_overlapLog:
case ZSTD_c_rsyncable:
case ZSTD_c_enableDedicatedDictSearch:
case ZSTD_c_enableLongDistanceMatching:
case ZSTD_c_ldmHashLog:
case ZSTD_c_ldmMinMatch:
case ZSTD_c_ldmBucketSizeLog:
case ZSTD_c_targetCBlockSize:
case ZSTD_c_srcSizeHint:
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
}
size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
ZSTD_cParameter param, int value)
{
DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
switch(param)
{
case ZSTD_c_format :
BOUNDCHECK(ZSTD_c_format, value);
CCtxParams->format = (ZSTD_format_e)value;
return (size_t)CCtxParams->format;
case ZSTD_c_compressionLevel : {
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
if (value == 0)
CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
else
CCtxParams->compressionLevel = value;
if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
return 0; /* return type (size_t) cannot represent negative values */
}
case ZSTD_c_windowLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_windowLog, value);
CCtxParams->cParams.windowLog = (U32)value;
return CCtxParams->cParams.windowLog;
case ZSTD_c_hashLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_hashLog, value);
CCtxParams->cParams.hashLog = (U32)value;
return CCtxParams->cParams.hashLog;
case ZSTD_c_chainLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_chainLog, value);
CCtxParams->cParams.chainLog = (U32)value;
return CCtxParams->cParams.chainLog;
case ZSTD_c_searchLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_searchLog, value);
CCtxParams->cParams.searchLog = (U32)value;
return (size_t)value;
case ZSTD_c_minMatch :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_minMatch, value);
CCtxParams->cParams.minMatch = value;
return CCtxParams->cParams.minMatch;
case ZSTD_c_targetLength :
BOUNDCHECK(ZSTD_c_targetLength, value);
CCtxParams->cParams.targetLength = value;
return CCtxParams->cParams.targetLength;
case ZSTD_c_strategy :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_strategy, value);
CCtxParams->cParams.strategy = (ZSTD_strategy)value;
return (size_t)CCtxParams->cParams.strategy;
case ZSTD_c_contentSizeFlag :
/* Content size written in frame header _when known_ (default:1) */
DEBUGLOG(4, "set content size flag = %u", (value!=0));
CCtxParams->fParams.contentSizeFlag = value != 0;
return CCtxParams->fParams.contentSizeFlag;
case ZSTD_c_checksumFlag :
/* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
CCtxParams->fParams.checksumFlag = value != 0;
return CCtxParams->fParams.checksumFlag;
case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
CCtxParams->fParams.noDictIDFlag = !value;
return !CCtxParams->fParams.noDictIDFlag;
case ZSTD_c_forceMaxWindow :
CCtxParams->forceWindow = (value != 0);
return CCtxParams->forceWindow;
case ZSTD_c_forceAttachDict : {
const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
CCtxParams->attachDictPref = pref;
return CCtxParams->attachDictPref;
}
case ZSTD_c_literalCompressionMode : {
const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
CCtxParams->literalCompressionMode = lcm;
return CCtxParams->literalCompressionMode;
}
case ZSTD_c_nbWorkers :
#ifndef ZSTD_MULTITHREAD
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
#else
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
CCtxParams->nbWorkers = value;
return CCtxParams->nbWorkers;
#endif
case ZSTD_c_jobSize :
#ifndef ZSTD_MULTITHREAD
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
#else
/* Adjust to the minimum non-default value. */
if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
value = ZSTDMT_JOBSIZE_MIN;
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
assert(value >= 0);
CCtxParams->jobSize = value;
return CCtxParams->jobSize;
#endif
case ZSTD_c_overlapLog :
#ifndef ZSTD_MULTITHREAD
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
#else
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
CCtxParams->overlapLog = value;
return CCtxParams->overlapLog;
#endif
case ZSTD_c_rsyncable :
#ifndef ZSTD_MULTITHREAD
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
#else
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
CCtxParams->rsyncable = value;
return CCtxParams->rsyncable;
#endif
case ZSTD_c_enableDedicatedDictSearch :
CCtxParams->enableDedicatedDictSearch = (value!=0);
return CCtxParams->enableDedicatedDictSearch;
case ZSTD_c_enableLongDistanceMatching :
CCtxParams->ldmParams.enableLdm = (value!=0);
return CCtxParams->ldmParams.enableLdm;
case ZSTD_c_ldmHashLog :
if (value!=0) /* 0 ==> auto */
BOUNDCHECK(ZSTD_c_ldmHashLog, value);
CCtxParams->ldmParams.hashLog = value;
return CCtxParams->ldmParams.hashLog;
case ZSTD_c_ldmMinMatch :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
CCtxParams->ldmParams.minMatchLength = value;
return CCtxParams->ldmParams.minMatchLength;
case ZSTD_c_ldmBucketSizeLog :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
CCtxParams->ldmParams.bucketSizeLog = value;
return CCtxParams->ldmParams.bucketSizeLog;
case ZSTD_c_ldmHashRateLog :
RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
parameter_outOfBound, "Param out of bounds!");
CCtxParams->ldmParams.hashRateLog = value;
return CCtxParams->ldmParams.hashRateLog;
case ZSTD_c_targetCBlockSize :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
CCtxParams->targetCBlockSize = value;
return CCtxParams->targetCBlockSize;
case ZSTD_c_srcSizeHint :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_srcSizeHint, value);
CCtxParams->srcSizeHint = value;
return CCtxParams->srcSizeHint;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
}
size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
{
return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
}
size_t ZSTD_CCtxParams_getParameter(
ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
{
switch(param)
{
case ZSTD_c_format :
*value = CCtxParams->format;
break;
case ZSTD_c_compressionLevel :
*value = CCtxParams->compressionLevel;
break;
case ZSTD_c_windowLog :
*value = (int)CCtxParams->cParams.windowLog;
break;
case ZSTD_c_hashLog :
*value = (int)CCtxParams->cParams.hashLog;
break;
case ZSTD_c_chainLog :
*value = (int)CCtxParams->cParams.chainLog;
break;
case ZSTD_c_searchLog :
*value = CCtxParams->cParams.searchLog;
break;
case ZSTD_c_minMatch :
*value = CCtxParams->cParams.minMatch;
break;
case ZSTD_c_targetLength :
*value = CCtxParams->cParams.targetLength;
break;
case ZSTD_c_strategy :
*value = (unsigned)CCtxParams->cParams.strategy;
break;
case ZSTD_c_contentSizeFlag :
*value = CCtxParams->fParams.contentSizeFlag;
break;
case ZSTD_c_checksumFlag :
*value = CCtxParams->fParams.checksumFlag;
break;
case ZSTD_c_dictIDFlag :
*value = !CCtxParams->fParams.noDictIDFlag;
break;
case ZSTD_c_forceMaxWindow :
*value = CCtxParams->forceWindow;
break;
case ZSTD_c_forceAttachDict :
*value = CCtxParams->attachDictPref;
break;
case ZSTD_c_literalCompressionMode :
*value = CCtxParams->literalCompressionMode;
break;
case ZSTD_c_nbWorkers :
#ifndef ZSTD_MULTITHREAD
assert(CCtxParams->nbWorkers == 0);
#endif
*value = CCtxParams->nbWorkers;
break;
case ZSTD_c_jobSize :
#ifndef ZSTD_MULTITHREAD
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
#else
assert(CCtxParams->jobSize <= INT_MAX);
*value = (int)CCtxParams->jobSize;
break;
#endif
case ZSTD_c_overlapLog :
#ifndef ZSTD_MULTITHREAD
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
#else
*value = CCtxParams->overlapLog;
break;
#endif
case ZSTD_c_rsyncable :
#ifndef ZSTD_MULTITHREAD
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
#else
*value = CCtxParams->rsyncable;
break;
#endif
case ZSTD_c_enableDedicatedDictSearch :
*value = CCtxParams->enableDedicatedDictSearch;
break;
case ZSTD_c_enableLongDistanceMatching :
*value = CCtxParams->ldmParams.enableLdm;
break;
case ZSTD_c_ldmHashLog :
*value = CCtxParams->ldmParams.hashLog;
break;
case ZSTD_c_ldmMinMatch :
*value = CCtxParams->ldmParams.minMatchLength;
break;
case ZSTD_c_ldmBucketSizeLog :
*value = CCtxParams->ldmParams.bucketSizeLog;
break;
case ZSTD_c_ldmHashRateLog :
*value = CCtxParams->ldmParams.hashRateLog;
break;
case ZSTD_c_targetCBlockSize :
*value = (int)CCtxParams->targetCBlockSize;
break;
case ZSTD_c_srcSizeHint :
*value = (int)CCtxParams->srcSizeHint;
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
return 0;
}
/** ZSTD_CCtx_setParametersUsingCCtxParams() :
* just applies `params` into `cctx`
* no action is performed, parameters are merely stored.
* If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
* This is possible even if a compression is ongoing.
* In which case, new parameters will be applied on the fly, starting with next compression job.
*/
size_t ZSTD_CCtx_setParametersUsingCCtxParams(
ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
{
DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"The context is in the wrong stage!");
RETURN_ERROR_IF(cctx->cdict, stage_wrong,
"Can't override parameters with cdict attached (some must "
"be inherited from the cdict).");
cctx->requestedParams = *params;
return 0;
}
ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't set pledgedSrcSize when not in init stage.");
cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
return 0;
}
static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
int const compressionLevel,
unsigned long long srcSizeHint,
size_t const dictSize);
static int ZSTD_dedicatedDictSearch_isSupported(
int const compressionLevel,
unsigned long long srcSizeHint,
size_t const dictSize);
static void ZSTD_dedicatedDictSearch_revertCParams(
ZSTD_compressionParameters* cParams);
/**
* Initializes the local dict using the requested parameters.
* NOTE: This does not use the pledged src size, because it may be used for more
* than one compression.
*/
static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
{
ZSTD_localDict* const dl = &cctx->localDict;
if (dl->dict == NULL) {
/* No local dictionary. */
assert(dl->dictBuffer == NULL);
assert(dl->cdict == NULL);
assert(dl->dictSize == 0);
return 0;
}
if (dl->cdict != NULL) {
assert(cctx->cdict == dl->cdict);
/* Local dictionary already initialized. */
return 0;
}
assert(dl->dictSize > 0);
assert(cctx->cdict == NULL);
assert(cctx->prefixDict.dict == NULL);
dl->cdict = ZSTD_createCDict_advanced2(
dl->dict,
dl->dictSize,
ZSTD_dlm_byRef,
dl->dictContentType,
&cctx->requestedParams,
cctx->customMem);
RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
cctx->cdict = dl->cdict;
return 0;
}
size_t ZSTD_CCtx_loadDictionary_advanced(
ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't load a dictionary when ctx is not in init stage.");
DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
ZSTD_clearAllDicts(cctx); /* in case one already exists */
if (dict == NULL || dictSize == 0) /* no dictionary mode */
return 0;
if (dictLoadMethod == ZSTD_dlm_byRef) {
cctx->localDict.dict = dict;
} else {
void* dictBuffer;
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
"no malloc for static CCtx");
dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
ZSTD_memcpy(dictBuffer, dict, dictSize);
cctx->localDict.dictBuffer = dictBuffer;
cctx->localDict.dict = dictBuffer;
}
cctx->localDict.dictSize = dictSize;
cctx->localDict.dictContentType = dictContentType;
return 0;
}
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
{
return ZSTD_CCtx_loadDictionary_advanced(
cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
}
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
{
return ZSTD_CCtx_loadDictionary_advanced(
cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
}
size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't ref a dict when ctx not in init stage.");
/* Free the existing local cdict (if any) to save memory. */
ZSTD_clearAllDicts(cctx);
cctx->cdict = cdict;
return 0;
}
size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
{
return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
}
size_t ZSTD_CCtx_refPrefix_advanced(
ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't ref a prefix when ctx not in init stage.");
ZSTD_clearAllDicts(cctx);
if (prefix != NULL && prefixSize > 0) {
cctx->prefixDict.dict = prefix;
cctx->prefixDict.dictSize = prefixSize;
cctx->prefixDict.dictContentType = dictContentType;
}
return 0;
}
/*! ZSTD_CCtx_reset() :
* Also dumps dictionary */
size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
{
if ( (reset == ZSTD_reset_session_only)
|| (reset == ZSTD_reset_session_and_parameters) ) {
cctx->streamStage = zcss_init;
cctx->pledgedSrcSizePlusOne = 0;
}
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't reset parameters only when not in init stage.");
ZSTD_clearAllDicts(cctx);
return ZSTD_CCtxParams_reset(&cctx->requestedParams);
}
return 0;
}
/** ZSTD_checkCParams() :
control CParam values remain within authorized range.
@return : 0, or an error code if one value is beyond authorized range */
size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
{
BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog);
BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog);
BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
return 0;
}
/** ZSTD_clampCParams() :
* make CParam values within valid range.
* @return : valid CParams */
static ZSTD_compressionParameters
ZSTD_clampCParams(ZSTD_compressionParameters cParams)
{
# define CLAMP_TYPE(cParam, val, type) { \
ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
}
# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
CLAMP(ZSTD_c_windowLog, cParams.windowLog);
CLAMP(ZSTD_c_chainLog, cParams.chainLog);
CLAMP(ZSTD_c_hashLog, cParams.hashLog);
CLAMP(ZSTD_c_searchLog, cParams.searchLog);
CLAMP(ZSTD_c_minMatch, cParams.minMatch);
CLAMP(ZSTD_c_targetLength,cParams.targetLength);
CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
return cParams;
}
/** ZSTD_cycleLog() :
* condition for correct operation : hashLog > 1 */
U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
{
U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
return hashLog - btScale;
}
/** ZSTD_adjustCParams_internal() :
* optimize `cPar` for a specified input (`srcSize` and `dictSize`).
* mostly downsize to reduce memory consumption and initialization latency.
* `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
* note : `srcSize==0` means 0!
* condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
static ZSTD_compressionParameters
ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize)
{
static const U64 minSrcSize = 513; /* (1<<9) + 1 */
static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
assert(ZSTD_checkCParams(cPar)==0);
if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
srcSize = minSrcSize;
/* resize windowLog if input is small enough, to use less memory */
if ( (srcSize < maxWindowResize)
&& (dictSize < maxWindowResize) ) {
U32 const tSize = (U32)(srcSize + dictSize);
static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
ZSTD_highbit32(tSize-1) + 1;
if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
}
if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
{ U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
if (cycleLog > cPar.windowLog)
cPar.chainLog -= (cycleLog - cPar.windowLog);
}
if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
return cPar;
}
ZSTD_compressionParameters
ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize)
{
cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
}
static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize);
static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize);
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
{
ZSTD_compressionParameters cParams;
if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
srcSizeHint = CCtxParams->srcSizeHint;
}
cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize);
if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch;
if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
assert(!ZSTD_checkCParams(cParams));
/* srcSizeHint == 0 means 0 */
return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
}
static size_t
ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
const U32 forCCtx)
{
size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
/* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
* surrounded by redzones in ASAN. */
size_t const tableSpace = chainSize * sizeof(U32)
+ hSize * sizeof(U32)
+ h3Size * sizeof(U32);
size_t const optPotentialSpace =
ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+ ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
? optPotentialSpace
: 0;
DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
(U32)chainSize, (U32)hSize, (U32)h3Size);
return tableSpace + optSpace;
}
static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
const ZSTD_compressionParameters* cParams,
const ldmParams_t* ldmParams,
const int isStatic,
const size_t buffInSize,
const size_t buffOutSize,
const U64 pledgedSrcSize)
{
size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
U32 const divider = (cParams->minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
+ ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
+ 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, /* forCCtx */ 1);
size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
size_t const ldmSeqSpace = ldmParams->enableLdm ?
ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
+ ZSTD_cwksp_alloc_size(buffOutSize);
size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
size_t const neededSpace =
cctxSpace +
entropySpace +
blockStateSpace +
ldmSpace +
ldmSeqSpace +
matchStateSize +
tokenSpace +
bufferSpace;
DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
return neededSpace;
}
size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0);
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
/* estimateCCtxSize is for one-shot compression. So no buffers should
* be needed. However, we still allocate two 0-sized buffers, which can
* take space under ASAN. */
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&cParams, &params->ldmParams, 1, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
}
size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
{
ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
}
static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
{
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
return ZSTD_estimateCCtxSize_usingCParams(cParams);
}
size_t ZSTD_estimateCCtxSize(int compressionLevel)
{
int level;
size_t memBudget = 0;
for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
if (newMB > memBudget) memBudget = newMB;
}
return memBudget;
}
size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
{ ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0);
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&cParams, &params->ldmParams, 1, inBuffSize, outBuffSize,
ZSTD_CONTENTSIZE_UNKNOWN);
}
}
size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
{
ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
}
static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
{
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
return ZSTD_estimateCStreamSize_usingCParams(cParams);
}
size_t ZSTD_estimateCStreamSize(int compressionLevel)
{
int level;
size_t memBudget = 0;
for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
if (newMB > memBudget) memBudget = newMB;
}
return memBudget;
}
/* ZSTD_getFrameProgression():
* tells how much data has been consumed (input) and produced (output) for current frame.
* able to count progression inside worker threads (non-blocking mode).
*/
ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
{
#ifdef ZSTD_MULTITHREAD
if (cctx->appliedParams.nbWorkers > 0) {
return ZSTDMT_getFrameProgression(cctx->mtctx);
}
#endif
{ ZSTD_frameProgression fp;
size_t const buffered = (cctx->inBuff == NULL) ? 0 :
cctx->inBuffPos - cctx->inToCompress;
if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
assert(buffered <= ZSTD_BLOCKSIZE_MAX);
fp.ingested = cctx->consumedSrcSize + buffered;
fp.consumed = cctx->consumedSrcSize;
fp.produced = cctx->producedCSize;
fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
fp.currentJobID = 0;
fp.nbActiveWorkers = 0;
return fp;
} }
/*! ZSTD_toFlushNow()
* Only useful for multithreading scenarios currently (nbWorkers >= 1).
*/
size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
{
#ifdef ZSTD_MULTITHREAD
if (cctx->appliedParams.nbWorkers > 0) {
return ZSTDMT_toFlushNow(cctx->mtctx);
}
#endif
(void)cctx;
return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
}
static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
ZSTD_compressionParameters cParams2)
{
(void)cParams1;
(void)cParams2;
assert(cParams1.windowLog == cParams2.windowLog);
assert(cParams1.chainLog == cParams2.chainLog);
assert(cParams1.hashLog == cParams2.hashLog);
assert(cParams1.searchLog == cParams2.searchLog);
assert(cParams1.minMatch == cParams2.minMatch);
assert(cParams1.targetLength == cParams2.targetLength);
assert(cParams1.strategy == cParams2.strategy);
}
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
{
int i;
for (i = 0; i < ZSTD_REP_NUM; ++i)
bs->rep[i] = repStartValue[i];
bs->entropy.huf.repeatMode = HUF_repeat_none;
bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
}
/*! ZSTD_invalidateMatchState()
* Invalidate all the matches in the match finder tables.
* Requires nextSrc and base to be set (can be NULL).
*/
static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
{
ZSTD_window_clear(&ms->window);
ms->nextToUpdate = ms->window.dictLimit;
ms->loadedDictEnd = 0;
ms->opt.litLengthSum = 0; /* force reset of btopt stats */
ms->dictMatchState = NULL;
}
/**
* Indicates whether this compression proceeds directly from user-provided
* source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
* whether the context needs to buffer the input/output (ZSTDb_buffered).
*/
typedef enum {
ZSTDb_not_buffered,
ZSTDb_buffered
} ZSTD_buffered_policy_e;
/**
* Controls, for this matchState reset, whether the tables need to be cleared /
* prepared for the coming compression (ZSTDcrp_makeClean), or whether the
* tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
* subsequent operation will overwrite the table space anyways (e.g., copying
* the matchState contents in from a CDict).
*/
typedef enum {
ZSTDcrp_makeClean,
ZSTDcrp_leaveDirty
} ZSTD_compResetPolicy_e;
/**
* Controls, for this matchState reset, whether indexing can continue where it
* left off (ZSTDirp_continue), or whether it needs to be restarted from zero
* (ZSTDirp_reset).
*/
typedef enum {
ZSTDirp_continue,
ZSTDirp_reset
} ZSTD_indexResetPolicy_e;
typedef enum {
ZSTD_resetTarget_CDict,
ZSTD_resetTarget_CCtx
} ZSTD_resetTarget_e;
static size_t
ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ZSTD_cwksp* ws,
const ZSTD_compressionParameters* cParams,
const ZSTD_compResetPolicy_e crp,
const ZSTD_indexResetPolicy_e forceResetIndex,
const ZSTD_resetTarget_e forWho)
{
size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
if (forceResetIndex == ZSTDirp_reset) {
ZSTD_window_init(&ms->window);
ZSTD_cwksp_mark_tables_dirty(ws);
}
ms->hashLog3 = hashLog3;
ZSTD_invalidateMatchState(ms);
assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
ZSTD_cwksp_clear_tables(ws);
DEBUGLOG(5, "reserving table space");
/* table Space */
ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
"failed a workspace allocation in ZSTD_reset_matchState");
DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
if (crp!=ZSTDcrp_leaveDirty) {
/* reset tables only */
ZSTD_cwksp_clean_tables(ws);
}
/* opt parser space */
if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
DEBUGLOG(4, "reserving optimal parser space");
ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
}
ms->cParams = *cParams;
RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
"failed a workspace allocation in ZSTD_reset_matchState");
return 0;
}
/* ZSTD_indexTooCloseToMax() :
* minor optimization : prefer memset() rather than reduceIndex()
* which is measurably slow in some circumstances (reported for Visual Studio).
* Works when re-using a context for a lot of smallish inputs :
* if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
* memset() will be triggered before reduceIndex().
*/
#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
{
return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
}
/*! ZSTD_resetCCtx_internal() :
note : `params` are assumed fully validated at this stage */
static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ZSTD_CCtx_params params,
U64 const pledgedSrcSize,
ZSTD_compResetPolicy_e const crp,
ZSTD_buffered_policy_e const zbuff)
{
ZSTD_cwksp* const ws = &zc->workspace;
DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
(U32)pledgedSrcSize, params.cParams.windowLog);
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
zc->isFirstBlock = 1;
if (params.ldmParams.enableLdm) {
/* Adjust long distance matching parameters */
ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
assert(params.ldmParams.hashRateLog < 32);
zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
}
{ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
U32 const divider = (params.cParams.minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
ZSTD_indexResetPolicy_e needsIndexReset =
(!indexTooClose && zc->initialized) ? ZSTDirp_continue : ZSTDirp_reset;
size_t const neededSpace =
ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&params.cParams, &params.ldmParams, zc->staticSize != 0,
buffInSize, buffOutSize, pledgedSrcSize);
FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
/* Check if workspace is large enough, alloc a new one if needed */
{
int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
DEBUGLOG(4, "Need %zu B workspace", neededSpace);
DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
if (workspaceTooSmall || workspaceWasteful) {
DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
ZSTD_cwksp_sizeof(ws) >> 10,
neededSpace >> 10);
RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
needsIndexReset = ZSTDirp_reset;
ZSTD_cwksp_free(ws, zc->customMem);
FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
DEBUGLOG(5, "reserving object space");
/* Statically sized space.
* entropyWorkspace never moves,
* though prev/next block swap places */
assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
} }
ZSTD_cwksp_clear(ws);
/* init params */
zc->appliedParams = params;
zc->blockState.matchState.cParams = params.cParams;
zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
zc->consumedSrcSize = 0;
zc->producedCSize = 0;
if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
zc->appliedParams.fParams.contentSizeFlag = 0;
DEBUGLOG(4, "pledged content size : %u ; flag : %u",
(unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
zc->blockSize = blockSize;
XXH64_reset(&zc->xxhState, 0);
zc->stage = ZSTDcs_init;
zc->dictID = 0;
ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
/* ZSTD_wildcopy() is used to copy into the literals buffer,
* so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
*/
zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
zc->seqStore.maxNbLit = blockSize;
/* buffers */
zc->inBuffSize = buffInSize;
zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
zc->outBuffSize = buffOutSize;
zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
/* ldm bucketOffsets table */
if (params.ldmParams.enableLdm) {
/* TODO: avoid memset? */
size_t const ldmBucketSize =
((size_t)1) << (params.ldmParams.hashLog -
params.ldmParams.bucketSizeLog);
zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, ldmBucketSize);
ZSTD_memset(zc->ldmState.bucketOffsets, 0, ldmBucketSize);
}
/* sequences storage */
ZSTD_referenceExternalSequences(zc, NULL, 0);
zc->seqStore.maxNbSeq = maxNbSeq;
zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
FORWARD_IF_ERROR(ZSTD_reset_matchState(
&zc->blockState.matchState,
ws,
&params.cParams,
crp,
needsIndexReset,
ZSTD_resetTarget_CCtx), "");
/* ldm hash table */
if (params.ldmParams.enableLdm) {
/* TODO: avoid memset? */
size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
zc->maxNbLdmSequences = maxNbLdmSeq;
ZSTD_window_init(&zc->ldmState.window);
ZSTD_window_clear(&zc->ldmState.window);
zc->ldmState.loadedDictEnd = 0;
}
/* Due to alignment, when reusing a workspace, we can actually consume
* up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
*/
assert(ZSTD_cwksp_used(ws) >= neededSpace &&
ZSTD_cwksp_used(ws) <= neededSpace + 3);
DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
zc->initialized = 1;
return 0;
}
}
/* ZSTD_invalidateRepCodes() :
* ensures next compression will not use repcodes from previous block.
* Note : only works with regular variant;
* do not use with extDict variant ! */
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
int i;
for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
}
/* These are the approximate sizes for each strategy past which copying the
* dictionary tables into the working context is faster than using them
* in-place.
*/
static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
8 KB, /* unused */
8 KB, /* ZSTD_fast */
16 KB, /* ZSTD_dfast */
32 KB, /* ZSTD_greedy */
32 KB, /* ZSTD_lazy */
32 KB, /* ZSTD_lazy2 */
32 KB, /* ZSTD_btlazy2 */
32 KB, /* ZSTD_btopt */
8 KB, /* ZSTD_btultra */
8 KB /* ZSTD_btultra2 */
};
static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
U64 pledgedSrcSize)
{
size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
return ( dedicatedDictSearch
|| pledgedSrcSize <= cutoff
|| pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
|| params->attachDictPref == ZSTD_dictForceAttach )
&& params->attachDictPref != ZSTD_dictForceCopy
&& !params->forceWindow; /* dictMatchState isn't correctly
* handled in _enforceMaxDist */
}
static size_t
ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
{
ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
unsigned const windowLog = params.cParams.windowLog;
assert(windowLog != 0);
/* Resize working context table params for input only, since the dict
* has its own tables. */
/* pledgedSrcSize == 0 means 0! */
if (cdict->matchState.dedicatedDictSearch) {
ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
}
params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, 0);
params.cParams.windowLog = windowLog;
FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
ZSTDcrp_makeClean, zbuff), "");
assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
}
{ const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
- cdict->matchState.window.base);
const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
if (cdictLen == 0) {
/* don't even attach dictionaries with no contents */
DEBUGLOG(4, "skipping attaching empty dictionary");
} else {
DEBUGLOG(4, "attaching dictionary into context");
cctx->blockState.matchState.dictMatchState = &cdict->matchState;
/* prep working match state so dict matches never have negative indices
* when they are translated to the working context's index space. */
if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
cctx->blockState.matchState.window.nextSrc =
cctx->blockState.matchState.window.base + cdictEnd;
ZSTD_window_clear(&cctx->blockState.matchState.window);
}
/* loadedDictEnd is expressed within the referential of the active context */
cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
} }
cctx->dictID = cdict->dictID;
/* copy block state */
ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
return 0;
}
static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
assert(!cdict->matchState.dedicatedDictSearch);
DEBUGLOG(4, "copying dictionary into context");
{ unsigned const windowLog = params.cParams.windowLog;
assert(windowLog != 0);
/* Copy only compression parameters related to tables. */
params.cParams = *cdict_cParams;
params.cParams.windowLog = windowLog;
FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
ZSTDcrp_leaveDirty, zbuff), "");
assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
}
ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
/* copy tables */
{ size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
ZSTD_memcpy(cctx->blockState.matchState.hashTable,
cdict->matchState.hashTable,
hSize * sizeof(U32));
ZSTD_memcpy(cctx->blockState.matchState.chainTable,
cdict->matchState.chainTable,
chainSize * sizeof(U32));
}
/* Zero the hashTable3, since the cdict never fills it */
{ int const h3log = cctx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
assert(cdict->matchState.hashLog3 == 0);
ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
}
ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
/* copy dictionary offsets */
{ ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
}
cctx->dictID = cdict->dictID;
/* copy block state */
ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
return 0;
}
/* We have a choice between copying the dictionary context into the working
* context, or referencing the dictionary context from the working context
* in-place. We decide here which strategy to use. */
static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
(unsigned)pledgedSrcSize);
if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
return ZSTD_resetCCtx_byAttachingCDict(
cctx, cdict, *params, pledgedSrcSize, zbuff);
} else {
return ZSTD_resetCCtx_byCopyingCDict(
cctx, cdict, *params, pledgedSrcSize, zbuff);
}
}
/*! ZSTD_copyCCtx_internal() :
* Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
* The "context", in this case, refers to the hash and chain tables,
* entropy tables, and dictionary references.
* `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
* @return : 0, or an error code */
static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
const ZSTD_CCtx* srcCCtx,
ZSTD_frameParameters fParams,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
DEBUGLOG(5, "ZSTD_copyCCtx_internal");
RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
"Can't copy a ctx that's not in init stage.");
ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
{ ZSTD_CCtx_params params = dstCCtx->requestedParams;
/* Copy only compression parameters related to tables. */
params.cParams = srcCCtx->appliedParams.cParams;
params.fParams = fParams;
ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
ZSTDcrp_leaveDirty, zbuff);
assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
}
ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
/* copy tables */
{ size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
int const h3log = srcCCtx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
srcCCtx->blockState.matchState.hashTable,
hSize * sizeof(U32));
ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
srcCCtx->blockState.matchState.chainTable,
chainSize * sizeof(U32));
ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
srcCCtx->blockState.matchState.hashTable3,
h3Size * sizeof(U32));
}
ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
/* copy dictionary offsets */
{
const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
}
dstCCtx->dictID = srcCCtx->dictID;
/* copy block state */
ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
return 0;
}
/*! ZSTD_copyCCtx() :
* Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
* pledgedSrcSize==0 means "unknown".
* @return : 0, or an error code */
size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
{
ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
fParams, pledgedSrcSize,
zbuff);
}
#define ZSTD_ROWSIZE 16
/*! ZSTD_reduceTable() :
* reduce table indexes by `reducerValue`, or squash to zero.
* PreserveMark preserves "unsorted mark" for btlazy2 strategy.
* It must be set to a clear 0/1 value, to remove branch during inlining.
* Presume table size is a multiple of ZSTD_ROWSIZE
* to help auto-vectorization */
FORCE_INLINE_TEMPLATE void
ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
{
int const nbRows = (int)size / ZSTD_ROWSIZE;
int cellNb = 0;
int rowNb;
assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
assert(size < (1U<<31)); /* can be casted to int */
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the table re-use logic is sound, and that we don't
* access table space that we haven't cleaned, we re-"poison" the table
* space every time we mark it dirty.
*
* This function however is intended to operate on those dirty tables and
* re-clean them. So when this function is used correctly, we can unpoison
* the memory it operated on. This introduces a blind spot though, since
* if we now try to operate on __actually__ poisoned memory, we will not
* detect that. */
__msan_unpoison(table, size * sizeof(U32));
#endif
for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
int column;
for (column=0; column<ZSTD_ROWSIZE; column++) {
if (preserveMark) {
U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
table[cellNb] += adder;
}
if (table[cellNb] < reducerValue) table[cellNb] = 0;
else table[cellNb] -= reducerValue;
cellNb++;
} }
}
static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
{
ZSTD_reduceTable_internal(table, size, reducerValue, 0);
}
static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
{
ZSTD_reduceTable_internal(table, size, reducerValue, 1);
}
/*! ZSTD_reduceIndex() :
* rescale all indexes to avoid future overflow (indexes are U32) */
static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
{
{ U32 const hSize = (U32)1 << params->cParams.hashLog;
ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
}
if (params->cParams.strategy != ZSTD_fast) {
U32 const chainSize = (U32)1 << params->cParams.chainLog;
if (params->cParams.strategy == ZSTD_btlazy2)
ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
else
ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
}
if (ms->hashLog3) {
U32 const h3Size = (U32)1 << ms->hashLog3;
ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
}
}
/*-*******************************************************
* Block entropic compression
*********************************************************/
/* See doc/zstd_compression_format.md for detailed format description */
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
{
const seqDef* const sequences = seqStorePtr->sequencesStart;
BYTE* const llCodeTable = seqStorePtr->llCode;
BYTE* const ofCodeTable = seqStorePtr->ofCode;
BYTE* const mlCodeTable = seqStorePtr->mlCode;
U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
U32 u;
assert(nbSeq <= seqStorePtr->maxNbSeq);
for (u=0; u<nbSeq; u++) {
U32 const llv = sequences[u].litLength;
U32 const mlv = sequences[u].matchLength;
llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
}
if (seqStorePtr->longLengthID==1)
llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
if (seqStorePtr->longLengthID==2)
mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
}
/* ZSTD_useTargetCBlockSize():
* Returns if target compressed block size param is being used.
* If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
* Returns 1 if true, 0 otherwise. */
static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
{
DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
return (cctxParams->targetCBlockSize != 0);
}
/* ZSTD_compressSequences_internal():
* actually compresses both literals and sequences */
MEM_STATIC size_t
ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
void* entropyWorkspace, size_t entropyWkspSize,
const int bmi2)
{
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
unsigned* count = (unsigned*)entropyWorkspace;
FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
const seqDef* const sequences = seqStorePtr->sequencesStart;
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
BYTE* seqHead;
BYTE* lastNCount = NULL;
entropyWorkspace = count + (MaxSeq + 1);
entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
DEBUGLOG(5, "ZSTD_compressSequences_internal (nbSeq=%zu)", nbSeq);
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
/* Compress literals */
{ const BYTE* const literals = seqStorePtr->litStart;
size_t const litSize = (size_t)(seqStorePtr->lit - literals);
size_t const cSize = ZSTD_compressLiterals(
&prevEntropy->huf, &nextEntropy->huf,
cctxParams->cParams.strategy,
ZSTD_disableLiteralsCompression(cctxParams),
op, dstCapacity,
literals, litSize,
entropyWorkspace, entropyWkspSize,
bmi2);
FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
assert(cSize <= dstCapacity);
op += cSize;
}
/* Sequences Header */
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
dstSize_tooSmall, "Can't fit seq hdr in output buf!");
if (nbSeq < 128) {
*op++ = (BYTE)nbSeq;
} else if (nbSeq < LONGNBSEQ) {
op[0] = (BYTE)((nbSeq>>8) + 0x80);
op[1] = (BYTE)nbSeq;
op+=2;
} else {
op[0]=0xFF;
MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
op+=3;
}
assert(op <= oend);
if (nbSeq==0) {
/* Copy the old tables over as if we repeated them */
ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
return (size_t)(op - ostart);
}
/* seqHead : flags for FSE encoding type */
seqHead = op++;
assert(op <= oend);
/* convert length/distances into codes */
ZSTD_seqToCodes(seqStorePtr);
/* build CTable for Literal Lengths */
{ unsigned max = MaxLL;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
DEBUGLOG(5, "Building LL table");
nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
count, max, mostFrequent, nbSeq,
LLFSELog, prevEntropy->fse.litlengthCTable,
LL_defaultNorm, LL_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(set_basic < set_compressed && set_rle < set_compressed);
assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
count, max, llCodeTable, nbSeq,
LL_defaultNorm, LL_defaultNormLog, MaxLL,
prevEntropy->fse.litlengthCTable,
sizeof(prevEntropy->fse.litlengthCTable),
entropyWorkspace, entropyWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
if (LLtype == set_compressed)
lastNCount = op;
op += countSize;
assert(op <= oend);
} }
/* build CTable for Offsets */
{ unsigned max = MaxOff;
size_t const mostFrequent = HIST_countFast_wksp(
count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
DEBUGLOG(5, "Building OF table");
nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
count, max, mostFrequent, nbSeq,
OffFSELog, prevEntropy->fse.offcodeCTable,
OF_defaultNorm, OF_defaultNormLog,
defaultPolicy, strategy);
assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
count, max, ofCodeTable, nbSeq,
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
prevEntropy->fse.offcodeCTable,
sizeof(prevEntropy->fse.offcodeCTable),
entropyWorkspace, entropyWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
if (Offtype == set_compressed)
lastNCount = op;
op += countSize;
assert(op <= oend);
} }
/* build CTable for MatchLengths */
{ unsigned max = MaxML;
size_t const mostFrequent = HIST_countFast_wksp(
count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
count, max, mostFrequent, nbSeq,
MLFSELog, prevEntropy->fse.matchlengthCTable,
ML_defaultNorm, ML_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
count, max, mlCodeTable, nbSeq,
ML_defaultNorm, ML_defaultNormLog, MaxML,
prevEntropy->fse.matchlengthCTable,
sizeof(prevEntropy->fse.matchlengthCTable),
entropyWorkspace, entropyWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
if (MLtype == set_compressed)
lastNCount = op;
op += countSize;
assert(op <= oend);
} }
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
{ size_t const bitstreamSize = ZSTD_encodeSequences(
op, (size_t)(oend - op),
CTable_MatchLength, mlCodeTable,
CTable_OffsetBits, ofCodeTable,
CTable_LitLength, llCodeTable,
sequences, nbSeq,
longOffsets, bmi2);
FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
op += bitstreamSize;
assert(op <= oend);
/* zstd versions <= 1.3.4 mistakenly report corruption when
* FSE_readNCount() receives a buffer < 4 bytes.
* Fixed by https://github.com/facebook/zstd/pull/1146.
* This can happen when the last set_compressed table present is 2
* bytes and the bitstream is only one byte.
* In this exceedingly rare case, we will simply emit an uncompressed
* block, since it isn't worth optimizing.
*/
if (lastNCount && (op - lastNCount) < 4) {
/* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
assert(op - lastNCount == 3);
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
"emitting an uncompressed block.");
return 0;
}
}
DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
return (size_t)(op - ostart);
}
MEM_STATIC size_t
ZSTD_compressSequences(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
size_t srcSize,
void* entropyWorkspace, size_t entropyWkspSize,
int bmi2)
{
size_t const cSize = ZSTD_compressSequences_internal(
seqStorePtr, prevEntropy, nextEntropy, cctxParams,
dst, dstCapacity,
entropyWorkspace, entropyWkspSize, bmi2);
if (cSize == 0) return 0;
/* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
* Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
*/
if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
return 0; /* block not compressed */
FORWARD_IF_ERROR(cSize, "ZSTD_compressSequences_internal failed");
/* Check compressibility */
{ size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
if (cSize >= maxCSize) return 0; /* block not compressed */
}
return cSize;
}
/* ZSTD_selectBlockCompressor() :
* Not static, but internal use only (used by long distance matcher)
* assumption : strat is a valid strategy */
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
{
static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
{ ZSTD_compressBlock_fast /* default for 0 */,
ZSTD_compressBlock_fast,
ZSTD_compressBlock_doubleFast,
ZSTD_compressBlock_greedy,
ZSTD_compressBlock_lazy,
ZSTD_compressBlock_lazy2,
ZSTD_compressBlock_btlazy2,
ZSTD_compressBlock_btopt,
ZSTD_compressBlock_btultra,
ZSTD_compressBlock_btultra2 },
{ ZSTD_compressBlock_fast_extDict /* default for 0 */,
ZSTD_compressBlock_fast_extDict,
ZSTD_compressBlock_doubleFast_extDict,
ZSTD_compressBlock_greedy_extDict,
ZSTD_compressBlock_lazy_extDict,
ZSTD_compressBlock_lazy2_extDict,
ZSTD_compressBlock_btlazy2_extDict,
ZSTD_compressBlock_btopt_extDict,
ZSTD_compressBlock_btultra_extDict,
ZSTD_compressBlock_btultra_extDict },
{ ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
ZSTD_compressBlock_fast_dictMatchState,
ZSTD_compressBlock_doubleFast_dictMatchState,
ZSTD_compressBlock_greedy_dictMatchState,
ZSTD_compressBlock_lazy_dictMatchState,
ZSTD_compressBlock_lazy2_dictMatchState,
ZSTD_compressBlock_btlazy2_dictMatchState,
ZSTD_compressBlock_btopt_dictMatchState,
ZSTD_compressBlock_btultra_dictMatchState,
ZSTD_compressBlock_btultra_dictMatchState },
{ NULL /* default for 0 */,
NULL,
NULL,
ZSTD_compressBlock_greedy_dedicatedDictSearch,
ZSTD_compressBlock_lazy_dedicatedDictSearch,
ZSTD_compressBlock_lazy2_dedicatedDictSearch,
NULL,
NULL,
NULL,
NULL }
};
ZSTD_blockCompressor selectedCompressor;
ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
assert(selectedCompressor != NULL);
return selectedCompressor;
}
static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
const BYTE* anchor, size_t lastLLSize)
{
ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
seqStorePtr->lit += lastLLSize;
}
void ZSTD_resetSeqStore(seqStore_t* ssPtr)
{
ssPtr->lit = ssPtr->litStart;
ssPtr->sequences = ssPtr->sequencesStart;
ssPtr->longLengthID = 0;
}
typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
{
ZSTD_matchState_t* const ms = &zc->blockState.matchState;
DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
/* Assert that we have correctly flushed the ctx params into the ms's copy */
ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
}
ZSTD_resetSeqStore(&(zc->seqStore));
/* required for optimal parser to read stats from dictionary */
ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
/* tell the optimal parser how we expect to compress literals */
ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
/* a gap between an attached dict and the current window is not safe,
* they must remain adjacent,
* and when that stops being the case, the dict must be unset */
assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
/* limited update after a very long match */
{ const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const U32 curr = (U32)(istart-base);
if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
if (curr > ms->nextToUpdate + 384)
ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
}
/* select and store sequences */
{ ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
size_t lastLLSize;
{ int i;
for (i = 0; i < ZSTD_REP_NUM; ++i)
zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
}
if (zc->externSeqStore.pos < zc->externSeqStore.size) {
assert(!zc->appliedParams.ldmParams.enableLdm);
/* Updates ldmSeqStore.pos */
lastLLSize =
ZSTD_ldm_blockCompress(&zc->externSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
src, srcSize);
assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
} else if (zc->appliedParams.ldmParams.enableLdm) {
rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
ldmSeqStore.seq = zc->ldmSequences;
ldmSeqStore.capacity = zc->maxNbLdmSequences;
/* Updates ldmSeqStore.size */
FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
&zc->appliedParams.ldmParams,
src, srcSize), "");
/* Updates ldmSeqStore.pos */
lastLLSize =
ZSTD_ldm_blockCompress(&ldmSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
src, srcSize);
assert(ldmSeqStore.pos == ldmSeqStore.size);
} else { /* not long range mode */
ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
}
{ const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
} }
return ZSTDbss_compress;
}
static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
{
const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
const seqDef* seqs = seqStore->sequencesStart;
size_t seqsSize = seqStore->sequences - seqs;
ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
size_t i; size_t position; int repIdx;
assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
for (i = 0, position = 0; i < seqsSize; ++i) {
outSeqs[i].offset = seqs[i].offset;
outSeqs[i].litLength = seqs[i].litLength;
outSeqs[i].matchLength = seqs[i].matchLength + MINMATCH;
if (i == seqStore->longLengthPos) {
if (seqStore->longLengthID == 1) {
outSeqs[i].litLength += 0x10000;
} else if (seqStore->longLengthID == 2) {
outSeqs[i].matchLength += 0x10000;
}
}
if (outSeqs[i].offset <= ZSTD_REP_NUM) {
outSeqs[i].rep = outSeqs[i].offset;
repIdx = (unsigned int)i - outSeqs[i].offset;
if (outSeqs[i].litLength == 0) {
if (outSeqs[i].offset < 3) {
--repIdx;
} else {
repIdx = (unsigned int)i - 1;
}
++outSeqs[i].rep;
}
assert(repIdx >= -3);
outSeqs[i].offset = repIdx >= 0 ? outSeqs[repIdx].offset : repStartValue[-repIdx - 1];
if (outSeqs[i].rep == 4) {
--outSeqs[i].offset;
}
} else {
outSeqs[i].offset -= ZSTD_REP_NUM;
}
position += outSeqs[i].litLength;
outSeqs[i].matchPos = (unsigned int)position;
position += outSeqs[i].matchLength;
}
zc->seqCollector.seqIndex += seqsSize;
}
size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize)
{
const size_t dstCapacity = ZSTD_compressBound(srcSize);
void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
SeqCollector seqCollector;
RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
seqCollector.collectSequences = 1;
seqCollector.seqStart = outSeqs;
seqCollector.seqIndex = 0;
seqCollector.maxSequences = outSeqsSize;
zc->seqCollector = seqCollector;
ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
ZSTD_customFree(dst, ZSTD_defaultCMem);
return zc->seqCollector.seqIndex;
}
/* Returns true if the given block is a RLE block */
static int ZSTD_isRLE(const BYTE *ip, size_t length) {
size_t i;
if (length < 2) return 1;
for (i = 1; i < length; ++i) {
if (ip[0] != ip[i]) return 0;
}
return 1;
}
/* Returns true if the given block may be RLE.
* This is just a heuristic based on the compressibility.
* It may return both false positives and false negatives.
*/
static int ZSTD_maybeRLE(seqStore_t const* seqStore)
{
size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
return nbSeqs < 4 && nbLits < 10;
}
static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
{
ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
zc->blockState.prevCBlock = zc->blockState.nextCBlock;
zc->blockState.nextCBlock = tmp;
}
static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 frame)
{
/* This the upper bound for the length of an rle block.
* This isn't the actual upper bound. Finding the real threshold
* needs further investigation.
*/
const U32 rleMaxLength = 25;
size_t cSize;
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
(unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
(unsigned)zc->blockState.matchState.nextToUpdate);
{ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
}
if (zc->seqCollector.collectSequences) {
ZSTD_copyBlockSequences(zc);
return 0;
}
/* encode sequences and literals */
cSize = ZSTD_compressSequences(&zc->seqStore,
&zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
dst, dstCapacity,
srcSize,
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
zc->bmi2);
if (frame &&
/* We don't want to emit our first block as a RLE even if it qualifies because
* doing so will cause the decoder (cli only) to throw a "should consume all input error."
* This is only an issue for zstd <= v1.4.3
*/
!zc->isFirstBlock &&
cSize < rleMaxLength &&
ZSTD_isRLE(ip, srcSize))
{
cSize = 1;
op[0] = ip[0];
}
out:
if (!ZSTD_isError(cSize) && cSize > 1) {
ZSTD_confirmRepcodesAndEntropyTables(zc);
}
/* We check that dictionaries have offset codes available for the first
* block. After the first block, the offcode table might not have large
* enough codes to represent the offsets in the data.
*/
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
return cSize;
}
static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const size_t bss, U32 lastBlock)
{
DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
if (bss == ZSTDbss_compress) {
if (/* We don't want to emit our first block as a RLE even if it qualifies because
* doing so will cause the decoder (cli only) to throw a "should consume all input error."
* This is only an issue for zstd <= v1.4.3
*/
!zc->isFirstBlock &&
ZSTD_maybeRLE(&zc->seqStore) &&
ZSTD_isRLE((BYTE const*)src, srcSize))
{
return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
}
/* Attempt superblock compression.
*
* Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
* standard ZSTD_compressBound(). This is a problem, because even if we have
* space now, taking an extra byte now could cause us to run out of space later
* and violate ZSTD_compressBound().
*
* Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
*
* In order to respect ZSTD_compressBound() we must attempt to emit a raw
* uncompressed block in these cases:
* * cSize == 0: Return code for an uncompressed block.
* * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
* ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
* output space.
* * cSize >= blockBound(srcSize): We have expanded the block too much so
* emit an uncompressed block.
*/
{
size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
if (cSize != ERROR(dstSize_tooSmall)) {
size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
ZSTD_confirmRepcodesAndEntropyTables(zc);
return cSize;
}
}
}
}
DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
/* Superblock compression failed, attempt to emit a single no compress block.
* The decoder will be able to stream this block since it is uncompressed.
*/
return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
}
static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 lastBlock)
{
size_t cSize = 0;
const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
(unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
return cSize;
}
static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
void const* ip,
void const* iend)
{
if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
U32 const maxDist = (U32)1 << params->cParams.windowLog;
U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
ZSTD_cwksp_mark_tables_dirty(ws);
ZSTD_reduceIndex(ms, params, correction);
ZSTD_cwksp_mark_tables_clean(ws);
if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
else ms->nextToUpdate -= correction;
/* invalidate dictionaries on overflow correction */
ms->loadedDictEnd = 0;
ms->dictMatchState = NULL;
}
}
/*! ZSTD_compress_frameChunk() :
* Compress a chunk of data into one or multiple blocks.
* All blocks will be terminated, all input will be consumed.
* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
* Frame is supposed already started (header already produced)
* @return : compressed size, or an error code
*/
static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 lastFrameChunk)
{
size_t blockSize = cctx->blockSize;
size_t remaining = srcSize;
const BYTE* ip = (const BYTE*)src;
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
if (cctx->appliedParams.fParams.checksumFlag && srcSize)
XXH64_update(&cctx->xxhState, src, srcSize);
while (remaining) {
ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
dstSize_tooSmall,
"not enough space to store compressed block");
if (remaining < blockSize) blockSize = remaining;
ZSTD_overflowCorrectIfNeeded(
ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
/* Ensure hash/chain table insertion resumes no sooner than lowlimit */
if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
{ size_t cSize;
if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
assert(cSize > 0);
assert(cSize <= blockSize + ZSTD_blockHeaderSize);
} else {
cSize = ZSTD_compressBlock_internal(cctx,
op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
ip, blockSize, 1 /* frame */);
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
if (cSize == 0) { /* block is not compressible */
cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
} else {
U32 const cBlockHeader = cSize == 1 ?
lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(op, cBlockHeader);
cSize += ZSTD_blockHeaderSize;
}
}
ip += blockSize;
assert(remaining >= blockSize);
remaining -= blockSize;
op += cSize;
assert(dstCapacity >= cSize);
dstCapacity -= cSize;
cctx->isFirstBlock = 0;
DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
(unsigned)cSize);
} }
if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
return (size_t)(op-ostart);
}
static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
{ BYTE* const op = (BYTE*)dst;
U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
U32 const checksumFlag = params->fParams.checksumFlag>0;
U32 const windowSize = (U32)1 << params->cParams.windowLog;
U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
U32 const fcsCode = params->fParams.contentSizeFlag ?
(pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
size_t pos=0;
assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
"dst buf is too small to fit worst-case frame header size.");
DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
!params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
if (params->format == ZSTD_f_zstd1) {
MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
pos = 4;
}
op[pos++] = frameHeaderDescriptionByte;
if (!singleSegment) op[pos++] = windowLogByte;
switch(dictIDSizeCode)
{
default: assert(0); /* impossible */
case 0 : break;
case 1 : op[pos] = (BYTE)(dictID); pos++; break;
case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
}
switch(fcsCode)
{
default: assert(0); /* impossible */
case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
}
return pos;
}
/* ZSTD_writeLastEmptyBlock() :
* output an empty Block with end-of-frame mark to complete a frame
* @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
* or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
*/
size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
{
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
"dst buf is too small to write frame trailer empty block.");
{ U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
MEM_writeLE24(dst, cBlockHeader24);
return ZSTD_blockHeaderSize;
}
}
size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
{
RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
"wrong cctx stage");
RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
parameter_unsupported,
"incompatible with ldm");
cctx->externSeqStore.seq = seq;
cctx->externSeqStore.size = nbSeq;
cctx->externSeqStore.capacity = nbSeq;
cctx->externSeqStore.pos = 0;
return 0;
}
static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 frame, U32 lastFrameChunk)
{
ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
size_t fhSize = 0;
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
cctx->stage, (unsigned)srcSize);
RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
"missing init (ZSTD_compressBegin)");
if (frame && (cctx->stage==ZSTDcs_init)) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
assert(fhSize <= dstCapacity);
dstCapacity -= fhSize;
dst = (char*)dst + fhSize;
cctx->stage = ZSTDcs_ongoing;
}
if (!srcSize) return fhSize; /* do not generate an empty block if no input */
if (!ZSTD_window_update(&ms->window, src, srcSize)) {
ms->nextToUpdate = ms->window.dictLimit;
}
if (cctx->appliedParams.ldmParams.enableLdm) {
ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
}
if (!frame) {
/* overflow check and correction for block mode */
ZSTD_overflowCorrectIfNeeded(
ms, &cctx->workspace, &cctx->appliedParams,
src, (BYTE const*)src + srcSize);
}
DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
{ size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
cctx->consumedSrcSize += srcSize;
cctx->producedCSize += (cSize + fhSize);
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
RETURN_ERROR_IF(
cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
srcSize_wrong,
"error : pledgedSrcSize = %u, while realSrcSize >= %u",
(unsigned)cctx->pledgedSrcSizePlusOne-1,
(unsigned)cctx->consumedSrcSize);
}
return cSize + fhSize;
}
}
size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
}
size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
{
ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
assert(!ZSTD_checkCParams(cParams));
return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
}
size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
{ size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
}
/*! ZSTD_loadDictionaryContent() :
* @return : 0, or an error code
*/
static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
ldmState_t* ls,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
const void* src, size_t srcSize,
ZSTD_dictTableLoadMethod_e dtlm)
{
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
ZSTD_window_update(&ms->window, src, srcSize);
ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
if (params->ldmParams.enableLdm && ls != NULL) {
ZSTD_window_update(&ls->window, src, srcSize);
ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
}
/* Assert that we the ms params match the params we're being given */
ZSTD_assertEqualCParams(params->cParams, ms->cParams);
if (srcSize <= HASH_READ_SIZE) return 0;
while (iend - ip > HASH_READ_SIZE) {
size_t const remaining = (size_t)(iend - ip);
size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
const BYTE* const ichunk = ip + chunk;
ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
if (params->ldmParams.enableLdm && ls != NULL)
ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, &params->ldmParams);
switch(params->cParams.strategy)
{
case ZSTD_fast:
ZSTD_fillHashTable(ms, ichunk, dtlm);
break;
case ZSTD_dfast:
ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) {
assert(chunk == remaining); /* must load everything in one go */
ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE);
} else if (chunk >= HASH_READ_SIZE) {
ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
}
break;
case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
if (chunk >= HASH_READ_SIZE)
ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
break;
default:
assert(0); /* not possible : not a valid strategy id */
}
ip = ichunk;
}
ms->nextToUpdate = (U32)(iend - ms->window.base);
return 0;
}
/* Dictionaries that assign zero probability to symbols that show up causes problems
* when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
* and only dictionaries with 100% valid symbols can be assumed valid.
*/
static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
{
U32 s;
if (dictMaxSymbolValue < maxSymbolValue) {
return FSE_repeat_check;
}
for (s = 0; s <= maxSymbolValue; ++s) {
if (normalizedCounter[s] == 0) {
return FSE_repeat_check;
}
}
return FSE_repeat_valid;
}
size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
const void* const dict, size_t dictSize)
{
short offcodeNCount[MaxOff+1];
unsigned offcodeMaxValue = MaxOff;
const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
const BYTE* const dictEnd = dictPtr + dictSize;
dictPtr += 8;
bs->entropy.huf.repeatMode = HUF_repeat_check;
{ unsigned maxSymbolValue = 255;
unsigned hasZeroWeights = 1;
size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
dictEnd-dictPtr, &hasZeroWeights);
/* We only set the loaded table as valid if it contains all non-zero
* weights. Otherwise, we set it to check */
if (!hasZeroWeights)
bs->entropy.huf.repeatMode = HUF_repeat_valid;
RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
dictPtr += hufHeaderSize;
}
{ unsigned offcodeLog;
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
/* fill all offset symbols to avoid garbage at end of table */
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
bs->entropy.fse.offcodeCTable,
offcodeNCount, MaxOff, offcodeLog,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
/* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
dictPtr += offcodeHeaderSize;
}
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
bs->entropy.fse.matchlengthCTable,
matchlengthNCount, matchlengthMaxValue, matchlengthLog,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
dictPtr += matchlengthHeaderSize;
}
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
bs->entropy.fse.litlengthCTable,
litlengthNCount, litlengthMaxValue, litlengthLog,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
dictPtr += litlengthHeaderSize;
}
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
bs->rep[0] = MEM_readLE32(dictPtr+0);
bs->rep[1] = MEM_readLE32(dictPtr+4);
bs->rep[2] = MEM_readLE32(dictPtr+8);
dictPtr += 12;
{ size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
U32 offcodeMax = MaxOff;
if (dictContentSize <= ((U32)-1) - 128 KB) {
U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
}
/* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
/* All repCodes must be <= dictContentSize and != 0 */
{ U32 u;
for (u=0; u<3; u++) {
RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
} } }
return dictPtr - (const BYTE*)dict;
}
/* Dictionary format :
* See :
* https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
*/
/*! ZSTD_loadZstdDictionary() :
* @return : dictID, or an error code
* assumptions : magic number supposed already checked
* dictSize supposed >= 8
*/
static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
ZSTD_matchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
const void* dict, size_t dictSize,
ZSTD_dictTableLoadMethod_e dtlm,
void* workspace)
{
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
size_t dictID;
size_t eSize;
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
assert(dictSize >= 8);
assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ );
eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
dictPtr += eSize;
{
size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
}
return dictID;
}
/** ZSTD_compress_insertDictionary() :
* @return : dictID, or an error code */
static size_t
ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
ZSTD_matchState_t* ms,
ldmState_t* ls,
ZSTD_cwksp* ws,
const ZSTD_CCtx_params* params,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
void* workspace)
{
DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
if ((dict==NULL) || (dictSize<8)) {
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
return 0;
}
ZSTD_reset_compressedBlockState(bs);
/* dict restricted modes */
if (dictContentType == ZSTD_dct_rawContent)
return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
if (dictContentType == ZSTD_dct_auto) {
DEBUGLOG(4, "raw content dictionary detected");
return ZSTD_loadDictionaryContent(
ms, ls, ws, params, dict, dictSize, dtlm);
}
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
assert(0); /* impossible */
}
/* dict as full zstd dictionary */
return ZSTD_loadZstdDictionary(
bs, ms, ws, params, dict, dictSize, dtlm, workspace);
}
#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
/*! ZSTD_compressBegin_internal() :
* @return : 0, or an error code */
static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
/* params are supposed to be fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if ( (cdict)
&& (cdict->dictContentSize > 0)
&& ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
|| pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
|| pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
|| cdict->compressionLevel == 0)
&& (params->attachDictPref != ZSTD_dictForceLoad) ) {
return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
}
FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
ZSTDcrp_makeClean, zbuff) , "");
{ size_t const dictID = cdict ?
ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
cdict->dictContentSize, dictContentType, dtlm,
cctx->entropyWorkspace)
: ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
dictContentType, dtlm, cctx->entropyWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= UINT_MAX);
cctx->dictID = (U32)dictID;
}
return 0;
}
size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
/* compression parameters verification and optimization */
FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
return ZSTD_compressBegin_internal(cctx,
dict, dictSize, dictContentType, dtlm,
cdict,
params, pledgedSrcSize,
ZSTDb_not_buffered);
}
/*! ZSTD_compressBegin_advanced() :
* @return : 0, or an error code */
size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize)
{
ZSTD_CCtx_params const cctxParams =
ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);
return ZSTD_compressBegin_advanced_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
NULL /*cdict*/,
&cctxParams, pledgedSrcSize);
}
size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
ZSTD_CCtx_params const cctxParams =
ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);
DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
}
size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
{
return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
}
/*! ZSTD_writeEpilogue() :
* Ends a frame.
* @return : nb of bytes written into dst (or an error code) */
static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
{
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
size_t fhSize = 0;
DEBUGLOG(4, "ZSTD_writeEpilogue");
RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
/* special case : empty frame */
if (cctx->stage == ZSTDcs_init) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
dstCapacity -= fhSize;
op += fhSize;
cctx->stage = ZSTDcs_ongoing;
}
if (cctx->stage != ZSTDcs_ending) {
/* write one last empty block, make it the "last" block */
U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
MEM_writeLE32(op, cBlockHeader24);
op += ZSTD_blockHeaderSize;
dstCapacity -= ZSTD_blockHeaderSize;
}
if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
MEM_writeLE32(op, checksum);
op += 4;
}
cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
return op-ostart;
}
size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
size_t endResult;
size_t const cSize = ZSTD_compressContinue_internal(cctx,
dst, dstCapacity, src, srcSize,
1 /* frame mode */, 1 /* last chunk */);
FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
DEBUGLOG(4, "end of frame : controlling src size");
RETURN_ERROR_IF(
cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
srcSize_wrong,
"error : pledgedSrcSize = %u, while realSrcSize = %u",
(unsigned)cctx->pledgedSrcSizePlusOne-1,
(unsigned)cctx->consumedSrcSize);
}
return cSize + endResult;
}
static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
const ZSTD_parameters* params)
{
ZSTD_CCtx_params const cctxParams =
ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
DEBUGLOG(4, "ZSTD_compress_internal");
return ZSTD_compress_advanced_internal(cctx,
dst, dstCapacity,
src, srcSize,
dict, dictSize,
&cctxParams);
}
size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
ZSTD_parameters params)
{
DEBUGLOG(4, "ZSTD_compress_advanced");
FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
return ZSTD_compress_internal(cctx,
dst, dstCapacity,
src, srcSize,
dict, dictSize,
&params);
}
/* Internal */
size_t ZSTD_compress_advanced_internal(
ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
const ZSTD_CCtx_params* params)
{
DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
params, srcSize, ZSTDb_not_buffered) , "");
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
}
size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize,
int compressionLevel)
{
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0);
ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, &params);
DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
assert(params.fParams.contentSizeFlag == 1);
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
}
size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel)
{
DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
assert(cctx != NULL);
return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
}
size_t ZSTD_compress(void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel)
{
size_t result;
ZSTD_CCtx ctxBody;
ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */
return result;
}
/* ===== Dictionary API ===== */
/*! ZSTD_estimateCDictSize_advanced() :
* Estimate amount of memory that will be needed to create a dictionary with following arguments */
size_t ZSTD_estimateCDictSize_advanced(
size_t dictSize, ZSTD_compressionParameters cParams,
ZSTD_dictLoadMethod_e dictLoadMethod)
{
DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
}
size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
{
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
}
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0; /* support sizeof on NULL */
DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
/* cdict may be in the workspace */
return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
+ ZSTD_cwksp_sizeof(&cdict->workspace);
}
static size_t ZSTD_initCDict_internal(
ZSTD_CDict* cdict,
const void* dictBuffer, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams,
ZSTD_CCtx_params params)
{
DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
assert(!ZSTD_checkCParams(cParams));
cdict->matchState.cParams = cParams;
cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
if (cdict->matchState.dedicatedDictSearch && dictSize > ZSTD_CHUNKSIZE_MAX) {
cdict->matchState.dedicatedDictSearch = 0;
}
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
cdict->dictContent = dictBuffer;
} else {
void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
cdict->dictContent = internalBuffer;
ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
}
cdict->dictContentSize = dictSize;
cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
/* Reset the state to no dictionary */
ZSTD_reset_compressedBlockState(&cdict->cBlockState);
FORWARD_IF_ERROR(ZSTD_reset_matchState(
&cdict->matchState,
&cdict->workspace,
&cParams,
ZSTDcrp_makeClean,
ZSTDirp_reset,
ZSTD_resetTarget_CDict), "");
/* (Maybe) load the dictionary
* Skips loading the dictionary if it is < 8 bytes.
*/
{ params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
params.fParams.contentSizeFlag = 1;
params.cParams = cParams;
{ size_t const dictID = ZSTD_compress_insertDictionary(
&cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
&params, cdict->dictContent, cdict->dictContentSize,
dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= (size_t)(U32)-1);
cdict->dictID = (U32)dictID;
}
}
return 0;
}
static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
{
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
{ size_t const workspaceSize =
ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
(dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
ZSTD_cwksp ws;
ZSTD_CDict* cdict;
if (!workspace) {
ZSTD_customFree(workspace, customMem);
return NULL;
}
ZSTD_cwksp_init(&ws, workspace, workspaceSize);
cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
assert(cdict != NULL);
ZSTD_cwksp_move(&cdict->workspace, &ws);
cdict->customMem = customMem;
cdict->compressionLevel = 0; /* signals advanced API usage */
return cdict;
}
}
ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
{
DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
{ ZSTD_CDict* cdict = ZSTD_createCDict_advanced_internal(dictSize,
dictLoadMethod, cParams,
customMem);
ZSTD_CCtx_params params;
ZSTD_memset(&params, 0, sizeof(params));
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dictBuffer, dictSize,
dictLoadMethod, dictContentType,
cParams, params) )) {
ZSTD_freeCDict(cdict);
return NULL;
}
return cdict;
}
}
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_CCtx_params* cctxParams,
ZSTD_customMem customMem)
{
int const dedicatedDictSearch = cctxParams->enableDedicatedDictSearch &&
ZSTD_dedicatedDictSearch_isSupported(
cctxParams->compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
if (!dedicatedDictSearch) {
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(
cctxParams->compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
return ZSTD_createCDict_advanced(dict, dictSize,
dictLoadMethod, dictContentType, cParams,
customMem);
}
{ ZSTD_compressionParameters const cParams = ZSTD_dedicatedDictSearch_getCParams(
cctxParams->compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
ZSTD_CDict* cdict = ZSTD_createCDict_advanced_internal(dictSize,
dictLoadMethod, cParams,
customMem);
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
dictLoadMethod, dictContentType,
cParams, *cctxParams) )) {
ZSTD_freeCDict(cdict);
return NULL;
}
return cdict;
}
}
ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
ZSTD_CDict* cdict = ZSTD_createCDict_advanced(dict, dictSize,
ZSTD_dlm_byCopy, ZSTD_dct_auto,
cParams, ZSTD_defaultCMem);
if (cdict)
cdict->compressionLevel = compressionLevel == 0 ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
return cdict;
}
ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
return ZSTD_createCDict_advanced(dict, dictSize,
ZSTD_dlm_byRef, ZSTD_dct_auto,
cParams, ZSTD_defaultCMem);
}
size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0; /* support free on NULL */
{ ZSTD_customMem const cMem = cdict->customMem;
int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
ZSTD_cwksp_free(&cdict->workspace, cMem);
if (!cdictInWorkspace) {
ZSTD_customFree(cdict, cMem);
}
return 0;
}
}
/*! ZSTD_initStaticCDict_advanced() :
* Generate a digested dictionary in provided memory area.
* workspace: The memory area to emplace the dictionary into.
* Provided pointer must 8-bytes aligned.
* It must outlive dictionary usage.
* workspaceSize: Use ZSTD_estimateCDictSize()
* to determine how large workspace must be.
* cParams : use ZSTD_getCParams() to transform a compression level
* into its relevants cParams.
* @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
* Note : there is no corresponding "free" function.
* Since workspace was allocated externally, it must be freed externally.
*/
const ZSTD_CDict* ZSTD_initStaticCDict(
void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams)
{
size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ matchStateSize;
ZSTD_CDict* cdict;
ZSTD_CCtx_params params;
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
{
ZSTD_cwksp ws;
ZSTD_cwksp_init(&ws, workspace, workspaceSize);
cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
if (cdict == NULL) return NULL;
ZSTD_cwksp_move(&cdict->workspace, &ws);
}
DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
(unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL;
ZSTD_memset(&params, 0, sizeof(params));
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
dictLoadMethod, dictContentType,
cParams, params) ))
return NULL;
return cdict;
}
ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
{
assert(cdict != NULL);
return cdict->matchState.cParams;
}
/* ZSTD_compressBegin_usingCDict_advanced() :
* cdict must be != NULL */
size_t ZSTD_compressBegin_usingCDict_advanced(
ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
{ ZSTD_CCtx_params params = cctx->requestedParams;
params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
|| pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
|| pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
|| cdict->compressionLevel == 0 )
&& (params.attachDictPref != ZSTD_dictForceLoad) ?
ZSTD_getCParamsFromCDict(cdict)
: ZSTD_getCParams(cdict->compressionLevel,
pledgedSrcSize,
cdict->dictContentSize);
/* Increase window log to fit the entire dictionary and source if the
* source size is known. Limit the increase to 19, which is the
* window log for compression level 1 with the largest source size.
*/
if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
}
params.fParams = fParams;
return ZSTD_compressBegin_internal(cctx,
NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
cdict,
&params, pledgedSrcSize,
ZSTDb_not_buffered);
}
}
/* ZSTD_compressBegin_usingCDict() :
* pledgedSrcSize=0 means "unknown"
* if pledgedSrcSize>0, it will enable contentSizeFlag */
size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
}
size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
{
FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
}
/*! ZSTD_compress_usingCDict() :
* Compression using a digested Dictionary.
* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
* Note that compression parameters are decided at CDict creation time
* while frame parameters are hardcoded */
size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict)
{
ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
}
/* ******************************************************************
* Streaming
********************************************************************/
ZSTD_CStream* ZSTD_createCStream(void)
{
DEBUGLOG(3, "ZSTD_createCStream");
return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
}
ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
{
return ZSTD_initStaticCCtx(workspace, workspaceSize);
}
ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
{ /* CStream and CCtx are now same object */
return ZSTD_createCCtx_advanced(customMem);
}
size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
{
return ZSTD_freeCCtx(zcs); /* same object */
}
/*====== Initialization ======*/
size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
size_t ZSTD_CStreamOutSize(void)
{
return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
}
static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
const ZSTD_CDict* const cdict,
ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_resetCStream_internal");
/* Finalize the compression parameters */
params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
/* params are supposed to be fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
dict, dictSize, dictContentType, ZSTD_dtlm_fast,
cdict,
&params, pledgedSrcSize,
ZSTDb_buffered) , "");
cctx->inToCompress = 0;
cctx->inBuffPos = 0;
cctx->inBuffTarget = cctx->blockSize
+ (cctx->blockSize == pledgedSrcSize); /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */
cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
cctx->streamStage = zcss_load;
cctx->frameEnded = 0;
return 0; /* ready to go */
}
/* ZSTD_resetCStream():
* pledgedSrcSize == 0 means "unknown" */
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
{
/* temporary : 0 interpreted as "unknown" during transition period.
* Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
* 0 will be interpreted as "empty" in the future.
*/
U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
return 0;
}
/*! ZSTD_initCStream_internal() :
* Note : for lib/compress only. Used by zstdmt_compress.c.
* Assumption 1 : params are valid
* Assumption 2 : either dict, or cdict, is defined, not both */
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_initCStream_internal");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
zcs->requestedParams = *params;
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if (dict) {
FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
} else {
/* Dictionary is cleared if !cdict */
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
}
return 0;
}
/* ZSTD_initCStream_usingCDict_advanced() :
* same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fParams,
unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
zcs->requestedParams.fParams = fParams;
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
return 0;
}
/* note : cdict must outlive compression session */
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
{
DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
return 0;
}
/* ZSTD_initCStream_advanced() :
* pledgedSrcSize must be exact.
* if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
* dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pss)
{
/* for compatibility with older programs relying on this behavior.
* Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
* This line will be removed in the future.
*/
U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
DEBUGLOG(4, "ZSTD_initCStream_advanced");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
zcs->requestedParams = ZSTD_assignParamsToCCtxParams(&zcs->requestedParams, &params);
FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
return 0;
}
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
{
DEBUGLOG(4, "ZSTD_initCStream_usingDict");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
return 0;
}
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
{
/* temporary : 0 interpreted as "unknown" during transition period.
* Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
* 0 will be interpreted as "empty" in the future.
*/
U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
DEBUGLOG(4, "ZSTD_initCStream_srcSize");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
return 0;
}
size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
{
DEBUGLOG(4, "ZSTD_initCStream");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
return 0;
}
/*====== Compression ======*/
static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
{
size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
if (hintInSize==0) hintInSize = cctx->blockSize;
return hintInSize;
}
/** ZSTD_compressStream_generic():
* internal function for all *compressStream*() variants
* non-static, because can be called from zstdmt_compress.c
* @return : hint size for next input */
static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective const flushMode)
{
const char* const istart = (const char*)input->src;
const char* const iend = input->size != 0 ? istart + input->size : istart;
const char* ip = input->pos != 0 ? istart + input->pos : istart;
char* const ostart = (char*)output->dst;
char* const oend = output->size != 0 ? ostart + output->size : ostart;
char* op = output->pos != 0 ? ostart + output->pos : ostart;
U32 someMoreWork = 1;
/* check expectations */
DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
assert(zcs->inBuff != NULL);
assert(zcs->inBuffSize > 0);
assert(zcs->outBuff != NULL);
assert(zcs->outBuffSize > 0);
assert(output->pos <= output->size);
assert(input->pos <= input->size);
while (someMoreWork) {
switch(zcs->streamStage)
{
case zcss_init:
RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
case zcss_load:
if ( (flushMode == ZSTD_e_end)
&& ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip)) /* enough dstCapacity */
&& (zcs->inBuffPos == 0) ) {
/* shortcut to compression pass directly into output buffer */
size_t const cSize = ZSTD_compressEnd(zcs,
op, oend-op, ip, iend-ip);
DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
ip = iend;
op += cSize;
zcs->frameEnded = 1;
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
someMoreWork = 0; break;
}
/* complete loading into inBuffer */
{ size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
size_t const loaded = ZSTD_limitCopy(
zcs->inBuff + zcs->inBuffPos, toLoad,
ip, iend-ip);
zcs->inBuffPos += loaded;
if (loaded != 0)
ip += loaded;
if ( (flushMode == ZSTD_e_continue)
&& (zcs->inBuffPos < zcs->inBuffTarget) ) {
/* not enough input to fill full block : stop here */
someMoreWork = 0; break;
}
if ( (flushMode == ZSTD_e_flush)
&& (zcs->inBuffPos == zcs->inToCompress) ) {
/* empty */
someMoreWork = 0; break;
}
}
/* compress current block (note : this stage cannot be stopped in the middle) */
DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
{ void* cDst;
size_t cSize;
size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
size_t oSize = oend-op;
unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
if (oSize >= ZSTD_compressBound(iSize))
cDst = op; /* compress into output buffer, to skip flush stage */
else
cDst = zcs->outBuff, oSize = zcs->outBuffSize;
cSize = lastBlock ?
ZSTD_compressEnd(zcs, cDst, oSize,
zcs->inBuff + zcs->inToCompress, iSize) :
ZSTD_compressContinue(zcs, cDst, oSize,
zcs->inBuff + zcs->inToCompress, iSize);
FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
zcs->frameEnded = lastBlock;
/* prepare next block */
zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
if (zcs->inBuffTarget > zcs->inBuffSize)
zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
(unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
if (!lastBlock)
assert(zcs->inBuffTarget <= zcs->inBuffSize);
zcs->inToCompress = zcs->inBuffPos;
if (cDst == op) { /* no need to flush */
op += cSize;
if (zcs->frameEnded) {
DEBUGLOG(5, "Frame completed directly in outBuffer");
someMoreWork = 0;
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
}
break;
}
zcs->outBuffContentSize = cSize;
zcs->outBuffFlushedSize = 0;
zcs->streamStage = zcss_flush; /* pass-through to flush stage */
}
/* fall-through */
case zcss_flush:
DEBUGLOG(5, "flush stage");
{ size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
(unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
if (flushed)
op += flushed;
zcs->outBuffFlushedSize += flushed;
if (toFlush!=flushed) {
/* flush not fully completed, presumably because dst is too small */
assert(op==oend);
someMoreWork = 0;
break;
}
zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
if (zcs->frameEnded) {
DEBUGLOG(5, "Frame completed on flush");
someMoreWork = 0;
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
break;
}
zcs->streamStage = zcss_load;
break;
}
default: /* impossible */
assert(0);
}
}
input->pos = ip - istart;
output->pos = op - ostart;
if (zcs->frameEnded) return 0;
return ZSTD_nextInputSizeHint(zcs);
}
static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
{
#ifdef ZSTD_MULTITHREAD
if (cctx->appliedParams.nbWorkers >= 1) {
assert(cctx->mtctx != NULL);
return ZSTDMT_nextInputSizeHint(cctx->mtctx);
}
#endif
return ZSTD_nextInputSizeHint(cctx);
}
size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
return ZSTD_nextInputSizeHint_MTorST(zcs);
}
size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp)
{
DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
/* check conditions */
RETURN_ERROR_IF(output->pos > output->size, GENERIC, "invalid buffer");
RETURN_ERROR_IF(input->pos > input->size, GENERIC, "invalid buffer");
assert(cctx!=NULL);
/* transparent initialization stage */
if (cctx->streamStage == zcss_init) {
ZSTD_CCtx_params params = cctx->requestedParams;
ZSTD_prefixDict const prefixDict = cctx->prefixDict;
FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1; /* auto-fix pledgedSrcSize */
params.cParams = ZSTD_getCParamsFromCCtxParams(
&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
#ifdef ZSTD_MULTITHREAD
if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
}
if (params.nbWorkers > 0) {
/* mt context creation */
if (cctx->mtctx == NULL) {
DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
params.nbWorkers);
cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem);
RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!");
}
/* mt compression */
DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
cctx->mtctx,
prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , "");
cctx->streamStage = zcss_load;
cctx->appliedParams.nbWorkers = params.nbWorkers;
} else
#endif
{ FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,
prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
cctx->cdict,
params, cctx->pledgedSrcSizePlusOne-1) , "");
assert(cctx->streamStage == zcss_load);
assert(cctx->appliedParams.nbWorkers == 0);
} }
/* end of transparent initialization stage */
/* compression stage */
#ifdef ZSTD_MULTITHREAD
if (cctx->appliedParams.nbWorkers > 0) {
int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
size_t flushMin;
assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */);
if (cctx->cParamsChanged) {
ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
cctx->cParamsChanged = 0;
}
do {
flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
if ( ZSTD_isError(flushMin)
|| (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
}
FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed");
} while (forceMaxProgress && flushMin != 0 && output->pos < output->size);
DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
/* Either we don't require maximum forward progress, we've finished the
* flush, or we are out of output space.
*/
assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size);
return flushMin;
}
#endif
FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
DEBUGLOG(5, "completed ZSTD_compressStream2");
return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
}
size_t ZSTD_compressStream2_simpleArgs (
ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos,
ZSTD_EndDirective endOp)
{
ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
ZSTD_inBuffer input = { src, srcSize, *srcPos };
/* ZSTD_compressStream2() will check validity of dstPos and srcPos */
size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
*dstPos = output.pos;
*srcPos = input.pos;
return cErr;
}
size_t ZSTD_compress2(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
{ size_t oPos = 0;
size_t iPos = 0;
size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
dst, dstCapacity, &oPos,
src, srcSize, &iPos,
ZSTD_e_end);
FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
if (result != 0) { /* compression not completed, due to lack of output space */
assert(oPos == dstCapacity);
RETURN_ERROR(dstSize_tooSmall, "");
}
assert(iPos == srcSize); /* all input is expected consumed */
return oPos;
}
}
/*====== Finalize ======*/
/*! ZSTD_flushStream() :
* @return : amount of data remaining to flush */
size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
ZSTD_inBuffer input = { NULL, 0, 0 };
return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
}
size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
ZSTD_inBuffer input = { NULL, 0, 0 };
size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
/* single thread mode : attempt to calculate remaining to flush more precisely */
{ size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
return toFlush;
}
}
/*-===== Pre-defined compression levels =====-*/
#define ZSTD_MAX_CLEVEL 22
int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
{ /* "default" - for any srcSize > 256 KB */
/* W, C, H, S, L, TL, strat */
{ 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
{ 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
{ 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
{ 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
{ 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
{ 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */
{ 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */
{ 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */
{ 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */
{ 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
{ 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */
{ 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */
{ 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */
{ 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */
{ 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
{ 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
{ 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
{ 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
{ 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
{ 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
{ 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
{ 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
{ 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
},
{ /* for srcSize <= 256 KB */
/* W, C, H, S, L, T, strat */
{ 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
{ 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
{ 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
{ 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
{ 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/
{ 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/
{ 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
{ 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
{ 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
{ 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
{ 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
{ 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
{ 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
{ 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
{ 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
{ 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
{ 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
{ 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
{ 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
{ 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
{ 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
{ 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
{ 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
},
{ /* for srcSize <= 128 KB */
/* W, C, H, S, L, T, strat */
{ 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
{ 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
{ 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
{ 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
{ 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
{ 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
{ 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
{ 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
{ 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
{ 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
{ 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
{ 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
{ 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
{ 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
{ 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
{ 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
{ 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
{ 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
{ 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
{ 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
{ 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
{ 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
{ 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
},
{ /* for srcSize <= 16 KB */
/* W, C, H, S, L, T, strat */
{ 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
{ 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
{ 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
{ 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
{ 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
{ 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
{ 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
{ 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
{ 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
{ 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
{ 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
{ 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
{ 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
{ 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
{ 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
{ 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
{ 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
{ 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
{ 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
{ 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
{ 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
{ 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
{ 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
},
};
static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, unsigned long long srcSizeHint, size_t const dictSize)
{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize);
switch (cParams.strategy) {
case ZSTD_fast:
case ZSTD_dfast:
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
break;
case ZSTD_btlazy2:
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
break;
}
return cParams;
}
static int ZSTD_dedicatedDictSearch_isSupported(int const compressionLevel, unsigned long long srcSizeHint, size_t const dictSize)
{
ZSTD_compressionParameters const cParams = ZSTD_dedicatedDictSearch_getCParams(compressionLevel, srcSizeHint, dictSize);
return (cParams.strategy >= ZSTD_greedy) && (cParams.strategy <= ZSTD_lazy2);
}
/**
* Reverses the adjustment applied to cparams when enabling dedicated dict
* search. This is used to recover the params set to be used in the working
* context. (Otherwise, those tables would also grow.)
*/
static void ZSTD_dedicatedDictSearch_revertCParams(
ZSTD_compressionParameters* cParams) {
switch (cParams->strategy) {
case ZSTD_fast:
case ZSTD_dfast:
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
break;
case ZSTD_btlazy2:
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
break;
}
}
/*! ZSTD_getCParams_internal() :
* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
* Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
* Use dictSize == 0 for unknown or unused. */
static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
{
int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
U64 const rSize = unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
int row = compressionLevel;
DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
{ ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */
/* refine parameters based on srcSize & dictSize */
return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);
}
}
/*! ZSTD_getCParams() :
* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
* Size values are optional, provide 0 if not known or unused */
ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
{
if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize);
}
/*! ZSTD_getParams() :
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
ZSTD_parameters params;
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize);
DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
ZSTD_memset(&params, 0, sizeof(params));
params.cParams = cParams;
params.fParams.contentSizeFlag = 1;
return params;
}
/*! ZSTD_getParams() :
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize);
}