fix confusion between unsigned <-> U32

as suggested in #1441.

generally U32 and unsigned are the same thing,
except when they are not ...

case : 32-bit compilation for MIPS (uint32_t == unsigned long)

A vast majority of transformation consists in transforming U32 into unsigned.
In rare cases, it's the other way around (typically for internal code, such as seeds).

Among a few issues this patches solves :
- some parameters were declared with type `unsigned` in *.h,
  but with type `U32` in their implementation *.c .
- some parameters have type unsigned*,
  but the caller user a pointer to U32 instead.

These fixes are useful.

However, the bulk of changes is about %u formating,
which requires unsigned type,
but generally receives U32 values instead,
often just for brevity (U32 is shorter than unsigned).
These changes are generally minor, or even annoying.

As a consequence, the amount of code changed is larger than I would expect for such a patch.

Testing is also a pain :
it requires manually modifying `mem.h`,
in order to lie about `U32`
and force it to be an `unsigned long` typically.
On a 64-bit system, this will break the equivalence unsigned == U32.
Unfortunately, it will also break a few static_assert(), controlling structure sizes.
So it also requires modifying `debug.h` to make `static_assert()` a noop.
And then reverting these changes.

So it's inconvenient, and as a consequence,
this property is currently not checked during CI tests.
Therefore, these problems can emerge again in the future.

I wonder if it is worth ensuring proper distinction of U32 != unsigned in CI tests.
It's another restriction for coding, adding more frustration during merge tests,
since most platforms don't need this distinction (hence contributor will not see it),
and while this can matter in theory, the number of platforms impacted seems minimal.

Thoughts ?
This commit is contained in:
Yann Collet 2018-12-21 16:19:44 -08:00
parent 8f35c7f94c
commit ededcfca57
30 changed files with 401 additions and 388 deletions

View File

@ -120,7 +120,7 @@ int main(int argc, const char** argv)
DISPLAYLEVEL(4, "Compressible data Generator \n"); DISPLAYLEVEL(4, "Compressible data Generator \n");
if (probaU32!=COMPRESSIBILITY_DEFAULT) if (probaU32!=COMPRESSIBILITY_DEFAULT)
DISPLAYLEVEL(3, "Compressibility : %i%%\n", probaU32); DISPLAYLEVEL(3, "Compressibility : %i%%\n", probaU32);
DISPLAYLEVEL(3, "Seed = %u \n", seed); DISPLAYLEVEL(3, "Seed = %u \n", (unsigned)seed);
RDG_genStdout(size, (double)probaU32/100, litProba, seed); RDG_genStdout(size, (double)probaU32/100, litProba, seed);
DISPLAYLEVEL(1, "\n"); DISPLAYLEVEL(1, "\n");

View File

@ -141,7 +141,7 @@ size_t ZSTD_seekable_freeCStream(ZSTD_seekable_CStream* zcs)
size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs, size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs,
int compressionLevel, int compressionLevel,
int checksumFlag, int checksumFlag,
U32 maxFrameSize) unsigned maxFrameSize)
{ {
zcs->framelog.size = 0; zcs->framelog.size = 0;
zcs->frameCSize = 0; zcs->frameCSize = 0;

View File

@ -201,7 +201,7 @@ size_t ZSTD_seekable_free(ZSTD_seekable* zs)
* Performs a binary search to find the last frame with a decompressed offset * Performs a binary search to find the last frame with a decompressed offset
* <= pos * <= pos
* @return : the frame's index */ * @return : the frame's index */
U32 ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long pos) unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long pos)
{ {
U32 lo = 0; U32 lo = 0;
U32 hi = (U32)zs->seekTable.tableLen; U32 hi = (U32)zs->seekTable.tableLen;
@ -222,32 +222,32 @@ U32 ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long
return lo; return lo;
} }
U32 ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs) unsigned ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs)
{ {
assert(zs->seekTable.tableLen <= UINT_MAX); assert(zs->seekTable.tableLen <= UINT_MAX);
return (U32)zs->seekTable.tableLen; return (unsigned)zs->seekTable.tableLen;
} }
unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, U32 frameIndex) unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex)
{ {
if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE; if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
return zs->seekTable.entries[frameIndex].cOffset; return zs->seekTable.entries[frameIndex].cOffset;
} }
unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, U32 frameIndex) unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex)
{ {
if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE; if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
return zs->seekTable.entries[frameIndex].dOffset; return zs->seekTable.entries[frameIndex].dOffset;
} }
size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, U32 frameIndex) size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, unsigned frameIndex)
{ {
if (frameIndex >= zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge); if (frameIndex >= zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge);
return zs->seekTable.entries[frameIndex + 1].cOffset - return zs->seekTable.entries[frameIndex + 1].cOffset -
zs->seekTable.entries[frameIndex].cOffset; zs->seekTable.entries[frameIndex].cOffset;
} }
size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, U32 frameIndex) size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, unsigned frameIndex)
{ {
if (frameIndex > zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge); if (frameIndex > zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge);
return zs->seekTable.entries[frameIndex + 1].dOffset - return zs->seekTable.entries[frameIndex + 1].dOffset -
@ -447,7 +447,7 @@ size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, unsign
return len; return len;
} }
size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, U32 frameIndex) size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned frameIndex)
{ {
if (frameIndex >= zs->seekTable.tableLen) { if (frameIndex >= zs->seekTable.tableLen) {
return ERROR(frameIndex_tooLarge); return ERROR(frameIndex_tooLarge);

View File

@ -389,7 +389,7 @@ MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
* Read (consume) next n bits from local register and update. * Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register. * Pay attention to not read more than nbBits contained into local register.
* @return : extracted value. */ * @return : extracted value. */
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
{ {
size_t const value = BIT_lookBits(bitD, nbBits); size_t const value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits); BIT_skipBits(bitD, nbBits);
@ -398,7 +398,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
/*! BIT_readBitsFast() : /*! BIT_readBitsFast() :
* unsafe version; only works only if nbBits >= 1 */ * unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
{ {
size_t const value = BIT_lookBitsFast(bitD, nbBits); size_t const value = BIT_lookBitsFast(bitD, nbBits);
assert(nbBits >= 1); assert(nbBits >= 1);

View File

@ -512,7 +512,7 @@ MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
const U32 tableLog = MEM_read16(ptr); const U32 tableLog = MEM_read16(ptr);
statePtr->value = (ptrdiff_t)1<<tableLog; statePtr->value = (ptrdiff_t)1<<tableLog;
statePtr->stateTable = u16ptr+2; statePtr->stateTable = u16ptr+2;
statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1)); statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
statePtr->stateLog = tableLog; statePtr->stateLog = tableLog;
} }
@ -531,7 +531,7 @@ MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U3
} }
} }
MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol) MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
{ {
FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
const U16* const stateTable = (const U16*)(statePtr->stateTable); const U16* const stateTable = (const U16*)(statePtr->stateTable);

View File

@ -232,7 +232,7 @@ size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) #define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
size_t HUF_buildCTable_wksp (HUF_CElt* tree, size_t HUF_buildCTable_wksp (HUF_CElt* tree,
const U32* count, U32 maxSymbolValue, U32 maxNbBits, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
void* workSpace, size_t wkspSize); void* workSpace, size_t wkspSize);
/*! HUF_readStats() : /*! HUF_readStats() :

View File

@ -658,7 +658,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
BYTE* op = ostart; BYTE* op = ostart;
BYTE* const oend = ostart + dstSize; BYTE* const oend = ostart + dstSize;
U32 count[FSE_MAX_SYMBOL_VALUE+1]; unsigned count[FSE_MAX_SYMBOL_VALUE+1];
S16 norm[FSE_MAX_SYMBOL_VALUE+1]; S16 norm[FSE_MAX_SYMBOL_VALUE+1];
FSE_CTable* CTable = (FSE_CTable*)workSpace; FSE_CTable* CTable = (FSE_CTable*)workSpace;
size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue); size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);

View File

@ -88,13 +88,13 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight
BYTE* op = ostart; BYTE* op = ostart;
BYTE* const oend = ostart + dstSize; BYTE* const oend = ostart + dstSize;
U32 maxSymbolValue = HUF_TABLELOG_MAX; unsigned maxSymbolValue = HUF_TABLELOG_MAX;
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER]; BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
U32 count[HUF_TABLELOG_MAX+1]; unsigned count[HUF_TABLELOG_MAX+1];
S16 norm[HUF_TABLELOG_MAX+1]; S16 norm[HUF_TABLELOG_MAX+1];
/* init conditions */ /* init conditions */
@ -134,7 +134,7 @@ struct HUF_CElt_s {
`CTable` : Huffman tree to save, using huf representation. `CTable` : Huffman tree to save, using huf representation.
@return : size of saved CTable */ @return : size of saved CTable */
size_t HUF_writeCTable (void* dst, size_t maxDstSize, size_t HUF_writeCTable (void* dst, size_t maxDstSize,
const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog) const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
{ {
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
@ -169,7 +169,7 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize,
} }
size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize) size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize)
{ {
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
@ -315,7 +315,7 @@ typedef struct {
U32 current; U32 current;
} rankPos; } rankPos;
static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue) static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue)
{ {
rankPos rank[32]; rankPos rank[32];
U32 n; U32 n;
@ -347,7 +347,7 @@ static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
*/ */
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1) #define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
{ {
nodeElt* const huffNode0 = (nodeElt*)workSpace; nodeElt* const huffNode0 = (nodeElt*)workSpace;
nodeElt* const huffNode = huffNode0+1; nodeElt* const huffNode = huffNode0+1;
@ -421,7 +421,7 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValu
* @return : maxNbBits * @return : maxNbBits
* Note : count is used before tree is written, so they can safely overlap * Note : count is used before tree is written, so they can safely overlap
*/ */
size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits) size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
{ {
huffNodeTable nodeTable; huffNodeTable nodeTable;
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable)); return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
@ -629,7 +629,7 @@ static size_t HUF_compressCTable_internal(
} }
typedef struct { typedef struct {
U32 count[HUF_SYMBOLVALUE_MAX + 1]; unsigned count[HUF_SYMBOLVALUE_MAX + 1];
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1]; HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
huffNodeTable nodeTable; huffNodeTable nodeTable;
} HUF_compress_tables_t; } HUF_compress_tables_t;
@ -689,9 +689,10 @@ HUF_compress_internal (void* dst, size_t dstSize,
/* Build Huffman Tree */ /* Build Huffman Tree */
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ CHECK_V_F(maxBits, HUF_buildCTable_wksp(table->CTable, table->count, { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
maxSymbolValue, huffLog, maxSymbolValue, huffLog,
table->nodeTable, sizeof(table->nodeTable)) ); table->nodeTable, sizeof(table->nodeTable));
CHECK_F(maxBits);
huffLog = (U32)maxBits; huffLog = (U32)maxBits;
/* Zero unused symbols in CTable, so we can check it for validity */ /* Zero unused symbols in CTable, so we can check it for validity */
memset(table->CTable + (maxSymbolValue + 1), 0, memset(table->CTable + (maxSymbolValue + 1), 0,

View File

@ -1266,7 +1266,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
/* opt parser space */ /* opt parser space */
if (forCCtx && (cParams->strategy >= ZSTD_btopt)) { if (forCCtx && (cParams->strategy >= ZSTD_btopt)) {
DEBUGLOG(4, "reserving optimal parser space"); DEBUGLOG(4, "reserving optimal parser space");
ms->opt.litFreq = (U32*)ptr; ms->opt.litFreq = (unsigned*)ptr;
ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits); ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1); ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1); ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
@ -1397,7 +1397,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
zc->appliedParams.fParams.contentSizeFlag = 0; zc->appliedParams.fParams.contentSizeFlag = 0;
DEBUGLOG(4, "pledged content size : %u ; flag : %u", DEBUGLOG(4, "pledged content size : %u ; flag : %u",
(U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
zc->blockSize = blockSize; zc->blockSize = blockSize;
XXH64_reset(&zc->xxhState, 0); XXH64_reset(&zc->xxhState, 0);
@ -1618,7 +1618,8 @@ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
ZSTD_buffered_policy_e zbuff) ZSTD_buffered_policy_e zbuff)
{ {
DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", (U32)pledgedSrcSize); DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
(unsigned)pledgedSrcSize);
if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) { if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
return ZSTD_resetCCtx_byAttachingCDict( return ZSTD_resetCCtx_byAttachingCDict(
@ -2151,7 +2152,7 @@ ZSTD_selectEncodingType(
assert(!ZSTD_isError(NCountCost)); assert(!ZSTD_isError(NCountCost));
assert(compressedCost < ERROR(maxCode)); assert(compressedCost < ERROR(maxCode));
DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u", DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
(U32)basicCost, (U32)repeatCost, (U32)compressedCost); (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
if (basicCost <= repeatCost && basicCost <= compressedCost) { if (basicCost <= repeatCost && basicCost <= compressedCost) {
DEBUGLOG(5, "Selected set_basic"); DEBUGLOG(5, "Selected set_basic");
assert(isDefaultAllowed); assert(isDefaultAllowed);
@ -2173,7 +2174,7 @@ ZSTD_selectEncodingType(
MEM_STATIC size_t MEM_STATIC size_t
ZSTD_buildCTable(void* dst, size_t dstCapacity, ZSTD_buildCTable(void* dst, size_t dstCapacity,
FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
U32* count, U32 max, unsigned* count, U32 max,
const BYTE* codeTable, size_t nbSeq, const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
const FSE_CTable* prevCTable, size_t prevCTableSize, const FSE_CTable* prevCTable, size_t prevCTableSize,
@ -2264,9 +2265,9 @@ ZSTD_encodeSequences_body(
U32 const ofBits = ofCode; U32 const ofBits = ofCode;
U32 const mlBits = ML_bits[mlCode]; U32 const mlBits = ML_bits[mlCode];
DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u", DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
sequences[n].litLength, (unsigned)sequences[n].litLength,
sequences[n].matchLength + MINMATCH, (unsigned)sequences[n].matchLength + MINMATCH,
sequences[n].offset); (unsigned)sequences[n].offset);
/* 32b*/ /* 64b*/ /* 32b*/ /* 64b*/
/* (7)*/ /* (7)*/ /* (7)*/ /* (7)*/
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
@ -2380,7 +2381,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
{ {
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
ZSTD_strategy const strategy = cctxParams->cParams.strategy; ZSTD_strategy const strategy = cctxParams->cParams.strategy;
U32 count[MaxSeq+1]; unsigned count[MaxSeq+1];
FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
@ -2436,7 +2437,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
/* convert length/distances into codes */ /* convert length/distances into codes */
ZSTD_seqToCodes(seqStorePtr); ZSTD_seqToCodes(seqStorePtr);
/* build CTable for Literal Lengths */ /* build CTable for Literal Lengths */
{ U32 max = MaxLL; { unsigned max = MaxLL;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
DEBUGLOG(5, "Building LL table"); DEBUGLOG(5, "Building LL table");
nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode; nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
@ -2457,7 +2458,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
op += countSize; op += countSize;
} } } }
/* build CTable for Offsets */ /* build CTable for Offsets */
{ U32 max = MaxOff; { unsigned max = MaxOff;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
@ -2479,7 +2480,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
op += countSize; op += countSize;
} } } }
/* build CTable for MatchLengths */ /* build CTable for MatchLengths */
{ U32 max = MaxML; { unsigned max = MaxML;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode; nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
@ -2628,7 +2629,7 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
ZSTD_matchState_t* const ms = &zc->blockState.matchState; ZSTD_matchState_t* const ms = &zc->blockState.matchState;
size_t cSize; size_t cSize;
DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
(U32)dstCapacity, ms->window.dictLimit, ms->nextToUpdate); (unsigned)dstCapacity, (unsigned)ms->window.dictLimit, (unsigned)ms->nextToUpdate);
assert(srcSize <= ZSTD_BLOCKSIZE_MAX); assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
/* Assert that we have correctly flushed the ctx params into the ms's copy */ /* Assert that we have correctly flushed the ctx params into the ms's copy */
@ -2743,7 +2744,7 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
assert(cctx->appliedParams.cParams.windowLog <= 31); assert(cctx->appliedParams.cParams.windowLog <= 31);
DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (U32)blockSize); DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
if (cctx->appliedParams.fParams.checksumFlag && srcSize) if (cctx->appliedParams.fParams.checksumFlag && srcSize)
XXH64_update(&cctx->xxhState, src, srcSize); XXH64_update(&cctx->xxhState, src, srcSize);
@ -2791,7 +2792,7 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
assert(dstCapacity >= cSize); assert(dstCapacity >= cSize);
dstCapacity -= cSize; dstCapacity -= cSize;
DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
(U32)cSize); (unsigned)cSize);
} } } }
if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
@ -2816,7 +2817,7 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)); assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
if (dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX) return ERROR(dstSize_tooSmall); if (dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX) return ERROR(dstSize_tooSmall);
DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
!params.fParams.noDictIDFlag, dictID, dictIDSizeCode); !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
if (params.format == ZSTD_f_zstd1) { if (params.format == ZSTD_f_zstd1) {
MEM_writeLE32(dst, ZSTD_MAGICNUMBER); MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
@ -2880,7 +2881,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
size_t fhSize = 0; size_t fhSize = 0;
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
cctx->stage, (U32)srcSize); cctx->stage, (unsigned)srcSize);
if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
if (frame && (cctx->stage==ZSTDcs_init)) { if (frame && (cctx->stage==ZSTDcs_init)) {
@ -2917,7 +2918,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
} }
} }
DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize); DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
{ size_t const cSize = frame ? { size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize); ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
@ -2929,7 +2930,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) { if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u", DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
(U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize); (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
return ERROR(srcSize_wrong); return ERROR(srcSize_wrong);
} }
} }
@ -2941,7 +2942,7 @@ size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity, void* dst, size_t dstCapacity,
const void* src, size_t srcSize) const void* src, size_t srcSize)
{ {
DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (U32)srcSize); DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
} }
@ -3238,7 +3239,7 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
ZSTD_CCtx_params const cctxParams = ZSTD_CCtx_params const cctxParams =
ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (U32)dictSize); DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
} }
@ -3282,7 +3283,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
if (cctx->appliedParams.fParams.checksumFlag) { if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
if (dstCapacity<4) return ERROR(dstSize_tooSmall); if (dstCapacity<4) return ERROR(dstSize_tooSmall);
DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", checksum); DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
MEM_writeLE32(op, checksum); MEM_writeLE32(op, checksum);
op += 4; op += 4;
} }
@ -3308,7 +3309,7 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
DEBUGLOG(4, "end of frame : controlling src size"); DEBUGLOG(4, "end of frame : controlling src size");
if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) { if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u", DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
(U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize); (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
return ERROR(srcSize_wrong); return ERROR(srcSize_wrong);
} } } }
return cSize + endResult; return cSize + endResult;
@ -3354,7 +3355,7 @@ size_t ZSTD_compress_advanced_internal(
const void* dict,size_t dictSize, const void* dict,size_t dictSize,
ZSTD_CCtx_params params) ZSTD_CCtx_params params)
{ {
DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (U32)srcSize); DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
CHECK_F( ZSTD_compressBegin_internal(cctx, CHECK_F( ZSTD_compressBegin_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
params, srcSize, ZSTDb_not_buffered) ); params, srcSize, ZSTDb_not_buffered) );
@ -3378,7 +3379,7 @@ size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
const void* src, size_t srcSize, const void* src, size_t srcSize,
int compressionLevel) int compressionLevel)
{ {
DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (U32)srcSize); DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
assert(cctx != NULL); assert(cctx != NULL);
return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
} }
@ -3404,7 +3405,7 @@ size_t ZSTD_estimateCDictSize_advanced(
size_t dictSize, ZSTD_compressionParameters cParams, size_t dictSize, ZSTD_compressionParameters cParams,
ZSTD_dictLoadMethod_e dictLoadMethod) ZSTD_dictLoadMethod_e dictLoadMethod)
{ {
DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict)); DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
} }
@ -3418,7 +3419,7 @@ size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
{ {
if (cdict==NULL) return 0; /* support sizeof on NULL */ if (cdict==NULL) return 0; /* support sizeof on NULL */
DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict)); DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict); return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
} }
@ -3429,7 +3430,7 @@ static size_t ZSTD_initCDict_internal(
ZSTD_dictContentType_e dictContentType, ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams) ZSTD_compressionParameters cParams)
{ {
DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (U32)dictContentType); DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
assert(!ZSTD_checkCParams(cParams)); assert(!ZSTD_checkCParams(cParams));
cdict->matchState.cParams = cParams; cdict->matchState.cParams = cParams;
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
@ -3479,7 +3480,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
ZSTD_dictContentType_e dictContentType, ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams, ZSTD_customMem customMem) ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
{ {
DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (U32)dictContentType); DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
if (!customMem.customAlloc ^ !customMem.customFree) return NULL; if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
{ ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem); { ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
@ -3560,7 +3561,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
void* ptr; void* ptr;
if ((size_t)workspace & 7) return NULL; /* 8-aligned */ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
(U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize)); (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL; if (workspaceSize < neededSize) return NULL;
if (dictLoadMethod == ZSTD_dlm_byCopy) { if (dictLoadMethod == ZSTD_dlm_byCopy) {
@ -3720,7 +3721,7 @@ static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
{ {
ZSTD_CCtx_params params = zcs->requestedParams; ZSTD_CCtx_params params = zcs->requestedParams;
DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize); DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
params.fParams.contentSizeFlag = 1; params.fParams.contentSizeFlag = 1;
return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize); return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
@ -3740,7 +3741,7 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
assert(!((dict) && (cdict))); /* either dict or cdict, not both */ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if (dict && dictSize >= 8) { if (dict && dictSize >= 8) {
DEBUGLOG(4, "loading dictionary of size %u", (U32)dictSize); DEBUGLOG(4, "loading dictionary of size %u", (unsigned)dictSize);
if (zcs->staticSize) { /* static CCtx : never uses malloc */ if (zcs->staticSize) { /* static CCtx : never uses malloc */
/* incompatible with internal cdict creation */ /* incompatible with internal cdict creation */
return ERROR(memory_allocation); return ERROR(memory_allocation);
@ -3799,7 +3800,7 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
ZSTD_parameters params, unsigned long long pledgedSrcSize) ZSTD_parameters params, unsigned long long pledgedSrcSize)
{ {
DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u", DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
(U32)pledgedSrcSize, params.fParams.contentSizeFlag); (unsigned)pledgedSrcSize, params.fParams.contentSizeFlag);
CHECK_F( ZSTD_checkCParams(params.cParams) ); CHECK_F( ZSTD_checkCParams(params.cParams) );
if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */ if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params); zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
@ -3860,7 +3861,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
U32 someMoreWork = 1; U32 someMoreWork = 1;
/* check expectations */ /* check expectations */
DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode); DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
assert(zcs->inBuff != NULL); assert(zcs->inBuff != NULL);
assert(zcs->inBuffSize > 0); assert(zcs->inBuffSize > 0);
assert(zcs->outBuff != NULL); assert(zcs->outBuff != NULL);
@ -3882,7 +3883,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
/* shortcut to compression pass directly into output buffer */ /* shortcut to compression pass directly into output buffer */
size_t const cSize = ZSTD_compressEnd(zcs, size_t const cSize = ZSTD_compressEnd(zcs,
op, oend-op, ip, iend-ip); op, oend-op, ip, iend-ip);
DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (U32)cSize); DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
if (ZSTD_isError(cSize)) return cSize; if (ZSTD_isError(cSize)) return cSize;
ip = iend; ip = iend;
op += cSize; op += cSize;
@ -3931,7 +3932,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
if (zcs->inBuffTarget > zcs->inBuffSize) if (zcs->inBuffTarget > zcs->inBuffSize)
zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
(U32)zcs->inBuffTarget, (U32)zcs->inBuffSize); (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
if (!lastBlock) if (!lastBlock)
assert(zcs->inBuffTarget <= zcs->inBuffSize); assert(zcs->inBuffTarget <= zcs->inBuffSize);
zcs->inToCompress = zcs->inBuffPos; zcs->inToCompress = zcs->inBuffPos;
@ -3955,7 +3956,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
size_t const flushed = ZSTD_limitCopy(op, oend-op, size_t const flushed = ZSTD_limitCopy(op, oend-op,
zcs->outBuff + zcs->outBuffFlushedSize, toFlush); zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
(U32)toFlush, (U32)(oend-op), (U32)flushed); (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
op += flushed; op += flushed;
zcs->outBuffFlushedSize += flushed; zcs->outBuffFlushedSize += flushed;
if (toFlush!=flushed) { if (toFlush!=flushed) {
@ -4010,7 +4011,7 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_inBuffer* input, ZSTD_inBuffer* input,
ZSTD_EndDirective endOp) ZSTD_EndDirective endOp)
{ {
DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (U32)endOp); DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
/* check conditions */ /* check conditions */
if (output->pos > output->size) return ERROR(GENERIC); if (output->pos > output->size) return ERROR(GENERIC);
if (input->pos > input->size) return ERROR(GENERIC); if (input->pos > input->size) return ERROR(GENERIC);
@ -4137,7 +4138,7 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{ size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4; size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize; size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (U32)toFlush); DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
return toFlush; return toFlush;
} }
} }

View File

@ -90,10 +90,10 @@ typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
typedef struct { typedef struct {
/* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */ /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
U32* litFreq; /* table of literals statistics, of size 256 */ unsigned* litFreq; /* table of literals statistics, of size 256 */
U32* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
U32* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
U32* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
@ -689,13 +689,13 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
U32 const blockEndIdx = (U32)((BYTE const*)srcEnd - window->base); U32 const blockEndIdx = (U32)((BYTE const*)srcEnd - window->base);
U32 loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0; U32 loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u", DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u",
blockEndIdx, maxDist); (unsigned)blockEndIdx, (unsigned)maxDist);
if (blockEndIdx > maxDist + loadedDictEnd) { if (blockEndIdx > maxDist + loadedDictEnd) {
U32 const newLowLimit = blockEndIdx - maxDist; U32 const newLowLimit = blockEndIdx - maxDist;
if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit; if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
if (window->dictLimit < window->lowLimit) { if (window->dictLimit < window->lowLimit) {
DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u", DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
window->dictLimit, window->lowLimit); (unsigned)window->dictLimit, (unsigned)window->lowLimit);
window->dictLimit = window->lowLimit; window->dictLimit = window->lowLimit;
} }
if (loadedDictEndPtr) if (loadedDictEndPtr)

View File

@ -72,10 +72,10 @@ static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
/* ZSTD_downscaleStat() : /* ZSTD_downscaleStat() :
* reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus) * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
* return the resulting sum of elements */ * return the resulting sum of elements */
static U32 ZSTD_downscaleStat(U32* table, U32 lastEltIndex, int malus) static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
{ {
U32 s, sum=0; U32 s, sum=0;
DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", lastEltIndex+1); DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31); assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
for (s=0; s<lastEltIndex+1; s++) { for (s=0; s<lastEltIndex+1; s++) {
table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus)); table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
@ -1041,7 +1041,7 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
U32 const offCode = opt[storePos].off; U32 const offCode = opt[storePos].off;
U32 const advance = llen + mlen; U32 const advance = llen + mlen;
DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
anchor - istart, llen, mlen); anchor - istart, (unsigned)llen, (unsigned)mlen);
if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */ if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
assert(storePos == storeEnd); /* must be last sequence */ assert(storePos == storeEnd); /* must be last sequence */
@ -1089,7 +1089,7 @@ size_t ZSTD_compressBlock_btopt(
/* used in 2-pass strategy */ /* used in 2-pass strategy */
static U32 ZSTD_upscaleStat(U32* table, U32 lastEltIndex, int bonus) static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
{ {
U32 s, sum=0; U32 s, sum=0;
assert(ZSTD_FREQ_DIV+bonus >= 0); assert(ZSTD_FREQ_DIV+bonus >= 0);

View File

@ -671,7 +671,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
{ U32 const magicNumber = MEM_readLE32(src); { U32 const magicNumber = MEM_readLE32(src);
DEBUGLOG(4, "reading magic number %08X (expecting %08X)", DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
(U32)magicNumber, (U32)ZSTD_MAGICNUMBER); (unsigned)magicNumber, ZSTD_MAGICNUMBER);
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t const skippableSize = readSkippableFrameSize(src, srcSize); size_t const skippableSize = readSkippableFrameSize(src, srcSize);
if (ZSTD_isError(skippableSize)) if (ZSTD_isError(skippableSize))
@ -789,7 +789,7 @@ static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skip
* or an error code, which can be tested using ZSTD_isError() */ * or an error code, which can be tested using ZSTD_isError() */
size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{ {
DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (U32)srcSize); DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
/* Sanity check */ /* Sanity check */
if (srcSize != dctx->expected) if (srcSize != dctx->expected)
return ERROR(srcSize_wrong); /* not allowed */ return ERROR(srcSize_wrong); /* not allowed */
@ -870,12 +870,12 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
return ERROR(corruption_detected); return ERROR(corruption_detected);
} }
if (ZSTD_isError(rSize)) return rSize; if (ZSTD_isError(rSize)) return rSize;
DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (U32)rSize); DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
dctx->decodedSize += rSize; dctx->decodedSize += rSize;
if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (U32)dctx->decodedSize); DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
if (dctx->decodedSize != dctx->fParams.frameContentSize) { if (dctx->decodedSize != dctx->fParams.frameContentSize) {
return ERROR(corruption_detected); return ERROR(corruption_detected);
@ -899,7 +899,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
assert(srcSize == 4); /* guaranteed by dctx->expected */ assert(srcSize == 4); /* guaranteed by dctx->expected */
{ U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
U32 const check32 = MEM_readLE32(src); U32 const check32 = MEM_readLE32(src);
DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", h32, check32); DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
if (check32 != h32) return ERROR(checksum_wrong); if (check32 != h32) return ERROR(checksum_wrong);
dctx->expected = 0; dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize; dctx->stage = ZSTDds_getFrameHeaderSize;
@ -969,7 +969,7 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
} }
{ short offcodeNCount[MaxOff+1]; { short offcodeNCount[MaxOff+1];
U32 offcodeMaxValue = MaxOff, offcodeLog; unsigned offcodeMaxValue = MaxOff, offcodeLog;
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted); if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);

View File

@ -427,7 +427,7 @@ ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
* @return : nb bytes read from src, * @return : nb bytes read from src,
* or an error code if it fails */ * or an error code if it fails */
static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr, static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
symbolEncodingType_e type, U32 max, U32 maxLog, symbolEncodingType_e type, unsigned max, U32 maxLog,
const void* src, size_t srcSize, const void* src, size_t srcSize,
const U32* baseValue, const U32* nbAdditionalBits, const U32* baseValue, const U32* nbAdditionalBits,
const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable, const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
@ -458,7 +458,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
} }
return 0; return 0;
case set_compressed : case set_compressed :
{ U32 tableLog; { unsigned tableLog;
S16 norm[MaxSeq+1]; S16 norm[MaxSeq+1];
size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
if (FSE_isError(headerSize)) return ERROR(corruption_detected); if (FSE_isError(headerSize)) return ERROR(corruption_detected);

View File

@ -39,7 +39,7 @@
/*-************************************* /*-*************************************
* Constants * Constants
***************************************/ ***************************************/
#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
#define DEFAULT_SPLITPOINT 1.0 #define DEFAULT_SPLITPOINT 1.0
/*-************************************* /*-*************************************
@ -543,7 +543,7 @@ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
if (totalSamplesSize < MAX(d, sizeof(U64)) || if (totalSamplesSize < MAX(d, sizeof(U64)) ||
totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
(U32)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20)); (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
return 0; return 0;
} }
/* Check if there are at least 5 training samples */ /* Check if there are at least 5 training samples */
@ -559,9 +559,9 @@ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
/* Zero the context */ /* Zero the context */
memset(ctx, 0, sizeof(*ctx)); memset(ctx, 0, sizeof(*ctx));
DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
(U32)trainingSamplesSize); (unsigned)trainingSamplesSize);
DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
(U32)testSamplesSize); (unsigned)testSamplesSize);
ctx->samples = samples; ctx->samples = samples;
ctx->samplesSizes = samplesSizes; ctx->samplesSizes = samplesSizes;
ctx->nbSamples = nbSamples; ctx->nbSamples = nbSamples;
@ -639,11 +639,11 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
/* Divide the data up into epochs of equal size. /* Divide the data up into epochs of equal size.
* We will select at least one segment from each epoch. * We will select at least one segment from each epoch.
*/ */
const U32 epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k / 4)); const unsigned epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k / 4));
const U32 epochSize = (U32)(ctx->suffixSize / epochs); const unsigned epochSize = (U32)(ctx->suffixSize / epochs);
size_t epoch; size_t epoch;
DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs, DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
epochSize); epochs, epochSize);
/* Loop through the epochs until there are no more segments or the dictionary /* Loop through the epochs until there are no more segments or the dictionary
* is full. * is full.
*/ */
@ -670,7 +670,7 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
DISPLAYUPDATE( DISPLAYUPDATE(
2, "\r%u%% ", 2, "\r%u%% ",
(U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
} }
DISPLAYLEVEL(2, "\r%79s\r", ""); DISPLAYLEVEL(2, "\r%79s\r", "");
return tail; return tail;
@ -722,7 +722,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
samplesBuffer, samplesSizes, nbSamples, parameters.zParams); samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
if (!ZSTD_isError(dictionarySize)) { if (!ZSTD_isError(dictionarySize)) {
DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
(U32)dictionarySize); (unsigned)dictionarySize);
} }
COVER_ctx_destroy(&ctx); COVER_ctx_destroy(&ctx);
COVER_map_destroy(&activeDmers); COVER_map_destroy(&activeDmers);
@ -1056,7 +1056,7 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
} }
/* Print status */ /* Print status */
LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
(U32)((iteration * 100) / kIterations)); (unsigned)((iteration * 100) / kIterations));
++iteration; ++iteration;
} }
COVER_best_wait(&best); COVER_best_wait(&best);

View File

@ -20,7 +20,7 @@
/*-************************************* /*-*************************************
* Constants * Constants
***************************************/ ***************************************/
#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) #define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
#define FASTCOVER_MAX_F 31 #define FASTCOVER_MAX_F 31
#define FASTCOVER_MAX_ACCEL 10 #define FASTCOVER_MAX_ACCEL 10
#define DEFAULT_SPLITPOINT 0.75 #define DEFAULT_SPLITPOINT 0.75
@ -309,7 +309,7 @@ FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx,
if (totalSamplesSize < MAX(d, sizeof(U64)) || if (totalSamplesSize < MAX(d, sizeof(U64)) ||
totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) { totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) {
DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
(U32)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20)); (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20));
return 0; return 0;
} }
@ -328,9 +328,9 @@ FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx,
/* Zero the context */ /* Zero the context */
memset(ctx, 0, sizeof(*ctx)); memset(ctx, 0, sizeof(*ctx));
DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
(U32)trainingSamplesSize); (unsigned)trainingSamplesSize);
DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
(U32)testSamplesSize); (unsigned)testSamplesSize);
ctx->samples = samples; ctx->samples = samples;
ctx->samplesSizes = samplesSizes; ctx->samplesSizes = samplesSizes;
@ -389,11 +389,11 @@ FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx,
/* Divide the data up into epochs of equal size. /* Divide the data up into epochs of equal size.
* We will select at least one segment from each epoch. * We will select at least one segment from each epoch.
*/ */
const U32 epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k)); const unsigned epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k));
const U32 epochSize = (U32)(ctx->nbDmers / epochs); const unsigned epochSize = (U32)(ctx->nbDmers / epochs);
size_t epoch; size_t epoch;
DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs, DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
epochSize); epochs, epochSize);
/* Loop through the epochs until there are no more segments or the dictionary /* Loop through the epochs until there are no more segments or the dictionary
* is full. * is full.
*/ */
@ -423,7 +423,7 @@ FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx,
memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
DISPLAYUPDATE( DISPLAYUPDATE(
2, "\r%u%% ", 2, "\r%u%% ",
(U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
} }
DISPLAYLEVEL(2, "\r%79s\r", ""); DISPLAYLEVEL(2, "\r%79s\r", "");
return tail; return tail;
@ -577,7 +577,7 @@ ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity,
samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams); samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams);
if (!ZSTD_isError(dictionarySize)) { if (!ZSTD_isError(dictionarySize)) {
DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
(U32)dictionarySize); (unsigned)dictionarySize);
} }
FASTCOVER_ctx_destroy(&ctx); FASTCOVER_ctx_destroy(&ctx);
free(segmentFreqs); free(segmentFreqs);
@ -702,7 +702,7 @@ ZDICT_optimizeTrainFromBuffer_fastCover(
} }
/* Print status */ /* Print status */
LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
(U32)((iteration * 100) / kIterations)); (unsigned)((iteration * 100) / kIterations));
++iteration; ++iteration;
} }
COVER_best_wait(&best); COVER_best_wait(&best);

View File

@ -260,7 +260,7 @@ static dictItem ZDICT_analyzePos(
U32 refinedEnd = end; U32 refinedEnd = end;
DISPLAYLEVEL(4, "\n"); DISPLAYLEVEL(4, "\n");
DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u ", (U32)(end-start), MINMATCHLENGTH, (U32)pos); DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos);
DISPLAYLEVEL(4, "\n"); DISPLAYLEVEL(4, "\n");
for (mml = MINMATCHLENGTH ; ; mml++) { for (mml = MINMATCHLENGTH ; ; mml++) {
@ -342,7 +342,7 @@ static dictItem ZDICT_analyzePos(
savings[i] = savings[i-1] + (lengthList[i] * (i-3)); savings[i] = savings[i-1] + (lengthList[i] * (i-3));
DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n", DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n",
(U32)pos, (U32)maxLength, savings[maxLength], (double)savings[maxLength] / maxLength); (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength);
solution.pos = (U32)pos; solution.pos = (U32)pos;
solution.length = (U32)maxLength; solution.length = (U32)maxLength;
@ -497,7 +497,7 @@ static U32 ZDICT_dictSize(const dictItem* dictList)
static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
const void* const buffer, size_t bufferSize, /* buffer must end with noisy guard band */ const void* const buffer, size_t bufferSize, /* buffer must end with noisy guard band */
const size_t* fileSizes, unsigned nbFiles, const size_t* fileSizes, unsigned nbFiles,
U32 minRatio, U32 notificationLevel) unsigned minRatio, U32 notificationLevel)
{ {
int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0)); int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));
int* const suffix = suffix0+1; int* const suffix = suffix0+1;
@ -523,11 +523,11 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
memset(doneMarks, 0, bufferSize+16); memset(doneMarks, 0, bufferSize+16);
/* limit sample set size (divsufsort limitation)*/ /* limit sample set size (divsufsort limitation)*/
if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (U32)(ZDICT_MAX_SAMPLES_SIZE>>20)); if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20));
while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles]; while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];
/* sort */ /* sort */
DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (U32)(bufferSize>>20)); DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20));
{ int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0); { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);
if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; } if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }
} }
@ -589,7 +589,7 @@ typedef struct
#define MAXREPOFFSET 1024 #define MAXREPOFFSET 1024
static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params, static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params,
U32* countLit, U32* offsetcodeCount, U32* matchlengthCount, U32* litlengthCount, U32* repOffsets, unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets,
const void* src, size_t srcSize, const void* src, size_t srcSize,
U32 notificationLevel) U32 notificationLevel)
{ {
@ -602,7 +602,7 @@ static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params,
} }
cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; } if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; }
if (cSize) { /* if == 0; block is not compressible */ if (cSize) { /* if == 0; block is not compressible */
const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc); const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
@ -671,7 +671,7 @@ static void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val,
* rewrite `countLit` to contain a mostly flat but still compressible distribution of literals. * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.
* necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode. * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.
*/ */
static void ZDICT_flatLit(U32* countLit) static void ZDICT_flatLit(unsigned* countLit)
{ {
int u; int u;
for (u=1; u<256; u++) countLit[u] = 2; for (u=1; u<256; u++) countLit[u] = 2;
@ -687,14 +687,14 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
const void* dictBuffer, size_t dictBufferSize, const void* dictBuffer, size_t dictBufferSize,
unsigned notificationLevel) unsigned notificationLevel)
{ {
U32 countLit[256]; unsigned countLit[256];
HUF_CREATE_STATIC_CTABLE(hufTable, 255); HUF_CREATE_STATIC_CTABLE(hufTable, 255);
U32 offcodeCount[OFFCODE_MAX+1]; unsigned offcodeCount[OFFCODE_MAX+1];
short offcodeNCount[OFFCODE_MAX+1]; short offcodeNCount[OFFCODE_MAX+1];
U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB)); U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));
U32 matchLengthCount[MaxML+1]; unsigned matchLengthCount[MaxML+1];
short matchLengthNCount[MaxML+1]; short matchLengthNCount[MaxML+1];
U32 litLengthCount[MaxLL+1]; unsigned litLengthCount[MaxLL+1];
short litLengthNCount[MaxLL+1]; short litLengthNCount[MaxLL+1];
U32 repOffset[MAXREPOFFSET]; U32 repOffset[MAXREPOFFSET];
offsetCount_t bestRepOffset[ZSTD_REP_NUM+1]; offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];
@ -983,33 +983,33 @@ size_t ZDICT_trainFromBuffer_unsafe_legacy(
/* display best matches */ /* display best matches */
if (params.zParams.notificationLevel>= 3) { if (params.zParams.notificationLevel>= 3) {
U32 const nb = MIN(25, dictList[0].pos); unsigned const nb = MIN(25, dictList[0].pos);
U32 const dictContentSize = ZDICT_dictSize(dictList); unsigned const dictContentSize = ZDICT_dictSize(dictList);
U32 u; unsigned u;
DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", dictList[0].pos-1, dictContentSize); DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize);
DISPLAYLEVEL(3, "list %u best segments \n", nb-1); DISPLAYLEVEL(3, "list %u best segments \n", nb-1);
for (u=1; u<nb; u++) { for (u=1; u<nb; u++) {
U32 const pos = dictList[u].pos; unsigned const pos = dictList[u].pos;
U32 const length = dictList[u].length; unsigned const length = dictList[u].length;
U32 const printedLength = MIN(40, length); U32 const printedLength = MIN(40, length);
if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) { if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {
free(dictList); free(dictList);
return ERROR(GENERIC); /* should never happen */ return ERROR(GENERIC); /* should never happen */
} }
DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |", DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
u, length, pos, dictList[u].savings); u, length, pos, (unsigned)dictList[u].savings);
ZDICT_printHex((const char*)samplesBuffer+pos, printedLength); ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
DISPLAYLEVEL(3, "| \n"); DISPLAYLEVEL(3, "| \n");
} } } }
/* create dictionary */ /* create dictionary */
{ U32 dictContentSize = ZDICT_dictSize(dictList); { unsigned dictContentSize = ZDICT_dictSize(dictList);
if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */ if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */
if (dictContentSize < targetDictSize/4) { if (dictContentSize < targetDictSize/4) {
DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (U32)maxDictSize); DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize);
if (samplesBuffSize < 10 * targetDictSize) if (samplesBuffSize < 10 * targetDictSize)
DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (U32)(samplesBuffSize>>20)); DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20));
if (minRep > MINRATIO) { if (minRep > MINRATIO) {
DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1); DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n"); DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n");
@ -1017,9 +1017,9 @@ size_t ZDICT_trainFromBuffer_unsafe_legacy(
} }
if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) { if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {
U32 proposedSelectivity = selectivity-1; unsigned proposedSelectivity = selectivity-1;
while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; } while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (U32)maxDictSize); DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize);
DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity); DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n"); DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n");
} }

View File

@ -836,7 +836,7 @@ MEM_STATIC void BITv05_skipBits(BITv05_DStream_t* bitD, U32 nbBits)
bitD->bitsConsumed += nbBits; bitD->bitsConsumed += nbBits;
} }
MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, U32 nbBits) MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits)
{ {
size_t value = BITv05_lookBits(bitD, nbBits); size_t value = BITv05_lookBits(bitD, nbBits);
BITv05_skipBits(bitD, nbBits); BITv05_skipBits(bitD, nbBits);
@ -845,7 +845,7 @@ MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, U32 nbBits)
/*!BITv05_readBitsFast : /*!BITv05_readBitsFast :
* unsafe version; only works only if nbBits >= 1 */ * unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, U32 nbBits) MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits)
{ {
size_t value = BITv05_lookBitsFast(bitD, nbBits); size_t value = BITv05_lookBitsFast(bitD, nbBits);
BITv05_skipBits(bitD, nbBits); BITv05_skipBits(bitD, nbBits);
@ -1162,7 +1162,7 @@ MEM_STATIC unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr)
/* ************************************************************** /* **************************************************************
* Complex types * Complex types
****************************************************************/ ****************************************************************/
typedef U32 DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)]; typedef unsigned DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)];
/* ************************************************************** /* **************************************************************
@ -2191,7 +2191,7 @@ static void HUFv05_fillDTableX4(HUFv05_DEltX4* DTable, const U32 targetLog,
} }
} }
size_t HUFv05_readDTableX4 (U32* DTable, const void* src, size_t srcSize) size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize)
{ {
BYTE weightList[HUFv05_MAX_SYMBOL_VALUE + 1]; BYTE weightList[HUFv05_MAX_SYMBOL_VALUE + 1];
sortedSymbol_t sortedSymbol[HUFv05_MAX_SYMBOL_VALUE + 1]; sortedSymbol_t sortedSymbol[HUFv05_MAX_SYMBOL_VALUE + 1];
@ -2205,7 +2205,7 @@ size_t HUFv05_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
void* dtPtr = DTable; void* dtPtr = DTable;
HUFv05_DEltX4* const dt = ((HUFv05_DEltX4*)dtPtr) + 1; HUFv05_DEltX4* const dt = ((HUFv05_DEltX4*)dtPtr) + 1;
HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(unsigned)); /* if compilation fails here, assertion is false */
if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
//memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
@ -2332,7 +2332,7 @@ static inline size_t HUFv05_decodeStreamX4(BYTE* p, BITv05_DStream_t* bitDPtr, B
size_t HUFv05_decompress1X4_usingDTable( size_t HUFv05_decompress1X4_usingDTable(
void* dst, size_t dstSize, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize, const void* cSrc, size_t cSrcSize,
const U32* DTable) const unsigned* DTable)
{ {
const BYTE* const istart = (const BYTE*) cSrc; const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst; BYTE* const ostart = (BYTE*) dst;
@ -2375,7 +2375,7 @@ size_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t
size_t HUFv05_decompress4X4_usingDTable( size_t HUFv05_decompress4X4_usingDTable(
void* dst, size_t dstSize, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize, const void* cSrc, size_t cSrcSize,
const U32* DTable) const unsigned* DTable)
{ {
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
@ -2999,7 +2999,7 @@ static size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t
const BYTE* ip = istart; const BYTE* ip = istart;
const BYTE* const iend = istart + srcSize; const BYTE* const iend = istart + srcSize;
U32 LLtype, Offtype, MLtype; U32 LLtype, Offtype, MLtype;
U32 LLlog, Offlog, MLlog; unsigned LLlog, Offlog, MLlog;
size_t dumpsLength; size_t dumpsLength;
/* check */ /* check */
@ -3057,7 +3057,7 @@ static size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t
break; break;
case FSEv05_ENCODING_DYNAMIC : case FSEv05_ENCODING_DYNAMIC :
default : /* impossible */ default : /* impossible */
{ U32 max = MaxLL; { unsigned max = MaxLL;
headerSize = FSEv05_readNCount(norm, &max, &LLlog, ip, iend-ip); headerSize = FSEv05_readNCount(norm, &max, &LLlog, ip, iend-ip);
if (FSEv05_isError(headerSize)) return ERROR(GENERIC); if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
if (LLlog > LLFSEv05Log) return ERROR(corruption_detected); if (LLlog > LLFSEv05Log) return ERROR(corruption_detected);
@ -3081,7 +3081,7 @@ static size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t
break; break;
case FSEv05_ENCODING_DYNAMIC : case FSEv05_ENCODING_DYNAMIC :
default : /* impossible */ default : /* impossible */
{ U32 max = MaxOff; { unsigned max = MaxOff;
headerSize = FSEv05_readNCount(norm, &max, &Offlog, ip, iend-ip); headerSize = FSEv05_readNCount(norm, &max, &Offlog, ip, iend-ip);
if (FSEv05_isError(headerSize)) return ERROR(GENERIC); if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
if (Offlog > OffFSEv05Log) return ERROR(corruption_detected); if (Offlog > OffFSEv05Log) return ERROR(corruption_detected);
@ -3105,7 +3105,7 @@ static size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t
break; break;
case FSEv05_ENCODING_DYNAMIC : case FSEv05_ENCODING_DYNAMIC :
default : /* impossible */ default : /* impossible */
{ U32 max = MaxML; { unsigned max = MaxML;
headerSize = FSEv05_readNCount(norm, &max, &MLlog, ip, iend-ip); headerSize = FSEv05_readNCount(norm, &max, &MLlog, ip, iend-ip);
if (FSEv05_isError(headerSize)) return ERROR(GENERIC); if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
if (MLlog > MLFSEv05Log) return ERROR(corruption_detected); if (MLlog > MLFSEv05Log) return ERROR(corruption_detected);
@ -3305,9 +3305,9 @@ static size_t ZSTDv05_decompressSequences(
const BYTE* const litEnd = litPtr + dctx->litSize; const BYTE* const litEnd = litPtr + dctx->litSize;
int nbSeq=0; int nbSeq=0;
const BYTE* dumps = NULL; const BYTE* dumps = NULL;
U32* DTableLL = dctx->LLTable; unsigned* DTableLL = dctx->LLTable;
U32* DTableML = dctx->MLTable; unsigned* DTableML = dctx->MLTable;
U32* DTableOffb = dctx->OffTable; unsigned* DTableOffb = dctx->OffTable;
const BYTE* const base = (const BYTE*) (dctx->base); const BYTE* const base = (const BYTE*) (dctx->base);
const BYTE* const vBase = (const BYTE*) (dctx->vBase); const BYTE* const vBase = (const BYTE*) (dctx->vBase);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
@ -3633,7 +3633,7 @@ static size_t ZSTDv05_loadEntropy(ZSTDv05_DCtx* dctx, const void* dict, size_t d
{ {
size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, errorCode, litlengthHeaderSize; size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, errorCode, litlengthHeaderSize;
short offcodeNCount[MaxOff+1]; short offcodeNCount[MaxOff+1];
U32 offcodeMaxValue=MaxOff, offcodeLog; unsigned offcodeMaxValue=MaxOff, offcodeLog;
short matchlengthNCount[MaxML+1]; short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog; unsigned matchlengthMaxValue = MaxML, matchlengthLog;
short litlengthNCount[MaxLL+1]; short litlengthNCount[MaxLL+1];

View File

@ -140,7 +140,7 @@ BMK_runOutcome_t BMK_benchFunction(BMK_benchParams_t p,
if ((p.errorFn != NULL) && (p.errorFn(res))) { if ((p.errorFn != NULL) && (p.errorFn(res))) {
RETURN_QUIET_ERROR(BMK_runOutcome_error(res), RETURN_QUIET_ERROR(BMK_runOutcome_error(res),
"Function benchmark failed on block %u (of size %u) with error %i", "Function benchmark failed on block %u (of size %u) with error %i",
blockNb, (U32)p.srcSizes[blockNb], (int)res); blockNb, (unsigned)p.srcSizes[blockNb], (int)res);
} }
dstSize += res; dstSize += res;
} } } }

View File

@ -430,7 +430,7 @@ BMK_benchMemAdvancedNoAlloc(
dctxprep.dictBufferSize = dictBufferSize; dctxprep.dictBufferSize = dictBufferSize;
DISPLAYLEVEL(2, "\r%70s\r", ""); /* blank line */ DISPLAYLEVEL(2, "\r%70s\r", ""); /* blank line */
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (U32)srcSize); DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (unsigned)srcSize);
while (!(compressionCompleted && decompressionCompleted)) { while (!(compressionCompleted && decompressionCompleted)) {
if (!compressionCompleted) { if (!compressionCompleted) {
@ -453,7 +453,7 @@ BMK_benchMemAdvancedNoAlloc(
{ int const ratioAccuracy = (ratio < 10.) ? 3 : 2; { int const ratioAccuracy = (ratio < 10.) ? 3 : 2;
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s\r", DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s\r",
marks[markNb], displayName, marks[markNb], displayName,
(U32)srcSize, (U32)cSize, (unsigned)srcSize, (unsigned)cSize,
ratioAccuracy, ratio, ratioAccuracy, ratio,
benchResult.cSpeed < (10 MB) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT); benchResult.cSpeed < (10 MB) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT);
} }
@ -476,7 +476,7 @@ BMK_benchMemAdvancedNoAlloc(
{ int const ratioAccuracy = (ratio < 10.) ? 3 : 2; { int const ratioAccuracy = (ratio < 10.) ? 3 : 2;
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s ,%6.1f MB/s \r", DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.*f),%6.*f MB/s ,%6.1f MB/s \r",
marks[markNb], displayName, marks[markNb], displayName,
(U32)srcSize, (U32)benchResult.cSize, (unsigned)srcSize, (unsigned)benchResult.cSize,
ratioAccuracy, ratio, ratioAccuracy, ratio,
benchResult.cSpeed < (10 MB) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT, benchResult.cSpeed < (10 MB) ? 2 : 1, (double)benchResult.cSpeed / MB_UNIT,
(double)benchResult.dSpeed / MB_UNIT); (double)benchResult.dSpeed / MB_UNIT);
@ -495,9 +495,9 @@ BMK_benchMemAdvancedNoAlloc(
displayName, (unsigned)crcOrig, (unsigned)crcCheck); displayName, (unsigned)crcOrig, (unsigned)crcCheck);
for (u=0; u<srcSize; u++) { for (u=0; u<srcSize; u++) {
if (((const BYTE*)srcBuffer)[u] != resultBuffer[u]) { if (((const BYTE*)srcBuffer)[u] != resultBuffer[u]) {
U32 segNb, bNb, pos; unsigned segNb, bNb, pos;
size_t bacc = 0; size_t bacc = 0;
DISPLAY("Decoding error at pos %u ", (U32)u); DISPLAY("Decoding error at pos %u ", (unsigned)u);
for (segNb = 0; segNb < nbBlocks; segNb++) { for (segNb = 0; segNb < nbBlocks; segNb++) {
if (bacc + srcSizes[segNb] > u) break; if (bacc + srcSizes[segNb] > u) break;
bacc += srcSizes[segNb]; bacc += srcSizes[segNb];
@ -668,7 +668,7 @@ static BMK_benchOutcome_t BMK_benchCLevel(const void* srcBuffer, size_t benchedS
if (displayLevel == 1 && !adv->additionalParam) /* --quiet mode */ if (displayLevel == 1 && !adv->additionalParam) /* --quiet mode */
DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n",
ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING, ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING,
(U32)benchedSize, adv->nbSeconds, (U32)(adv->blockSize>>10)); (unsigned)benchedSize, adv->nbSeconds, (unsigned)(adv->blockSize>>10));
return BMK_benchMemAdvanced(srcBuffer, benchedSize, return BMK_benchMemAdvanced(srcBuffer, benchedSize,
NULL, 0, NULL, 0,
@ -814,7 +814,7 @@ BMK_benchOutcome_t BMK_benchFilesAdvanced(
if (dictBuffer==NULL) { if (dictBuffer==NULL) {
free(fileSizes); free(fileSizes);
RETURN_ERROR(11, BMK_benchOutcome_t, "not enough memory for dictionary (%u bytes)", RETURN_ERROR(11, BMK_benchOutcome_t, "not enough memory for dictionary (%u bytes)",
(U32)dictBufferSize); (unsigned)dictBufferSize);
} }
{ int const errorCode = BMK_loadFiles(dictBuffer, dictBufferSize, { int const errorCode = BMK_loadFiles(dictBuffer, dictBufferSize,
@ -830,7 +830,7 @@ BMK_benchOutcome_t BMK_benchFilesAdvanced(
benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3; benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3;
if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad; if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad;
if (benchedSize < totalSizeToLoad) if (benchedSize < totalSizeToLoad)
DISPLAY("Not enough memory; testing %u MB only...\n", (U32)(benchedSize >> 20)); DISPLAY("Not enough memory; testing %u MB only...\n", (unsigned)(benchedSize >> 20));
srcBuffer = benchedSize ? malloc(benchedSize) : NULL; srcBuffer = benchedSize ? malloc(benchedSize) : NULL;
if (!srcBuffer) { if (!srcBuffer) {

View File

@ -81,18 +81,18 @@ static BYTE RDG_genChar(U32* seed, const BYTE* ldt)
} }
static U32 RDG_rand15Bits (unsigned* seedPtr) static U32 RDG_rand15Bits (U32* seedPtr)
{ {
return RDG_rand(seedPtr) & 0x7FFF; return RDG_rand(seedPtr) & 0x7FFF;
} }
static U32 RDG_randLength(unsigned* seedPtr) static U32 RDG_randLength(U32* seedPtr)
{ {
if (RDG_rand(seedPtr) & 7) return (RDG_rand(seedPtr) & 0xF); /* small length */ if (RDG_rand(seedPtr) & 7) return (RDG_rand(seedPtr) & 0xF); /* small length */
return (RDG_rand(seedPtr) & 0x1FF) + 0xF; return (RDG_rand(seedPtr) & 0x1FF) + 0xF;
} }
static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, double matchProba, const BYTE* ldt, unsigned* seedPtr) static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, double matchProba, const BYTE* ldt, U32* seedPtr)
{ {
BYTE* const buffPtr = (BYTE*)buffer; BYTE* const buffPtr = (BYTE*)buffer;
U32 const matchProba32 = (U32)(32768 * matchProba); U32 const matchProba32 = (U32)(32768 * matchProba);
@ -141,16 +141,18 @@ static void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, doubl
void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba, unsigned seed) void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba, unsigned seed)
{ {
U32 seed32 = seed;
BYTE ldt[LTSIZE]; BYTE ldt[LTSIZE];
memset(ldt, '0', sizeof(ldt)); /* yes, character '0', this is intentional */ memset(ldt, '0', sizeof(ldt)); /* yes, character '0', this is intentional */
if (litProba<=0.0) litProba = matchProba / 4.5; if (litProba<=0.0) litProba = matchProba / 4.5;
RDG_fillLiteralDistrib(ldt, litProba); RDG_fillLiteralDistrib(ldt, litProba);
RDG_genBlock(buffer, size, 0, matchProba, ldt, &seed); RDG_genBlock(buffer, size, 0, matchProba, ldt, &seed32);
} }
void RDG_genStdout(unsigned long long size, double matchProba, double litProba, unsigned seed) void RDG_genStdout(unsigned long long size, double matchProba, double litProba, unsigned seed)
{ {
U32 seed32 = seed;
size_t const stdBlockSize = 128 KB; size_t const stdBlockSize = 128 KB;
size_t const stdDictSize = 32 KB; size_t const stdDictSize = 32 KB;
BYTE* const buff = (BYTE*)malloc(stdDictSize + stdBlockSize); BYTE* const buff = (BYTE*)malloc(stdDictSize + stdBlockSize);
@ -165,12 +167,12 @@ void RDG_genStdout(unsigned long long size, double matchProba, double litProba,
SET_BINARY_MODE(stdout); SET_BINARY_MODE(stdout);
/* Generate initial dict */ /* Generate initial dict */
RDG_genBlock(buff, stdDictSize, 0, matchProba, ldt, &seed); RDG_genBlock(buff, stdDictSize, 0, matchProba, ldt, &seed32);
/* Generate compressible data */ /* Generate compressible data */
while (total < size) { while (total < size) {
size_t const genBlockSize = (size_t) (MIN (stdBlockSize, size-total)); size_t const genBlockSize = (size_t) (MIN (stdBlockSize, size-total));
RDG_genBlock(buff, stdDictSize+stdBlockSize, stdDictSize, matchProba, ldt, &seed); RDG_genBlock(buff, stdDictSize+stdBlockSize, stdDictSize, matchProba, ldt, &seed32);
total += genBlockSize; total += genBlockSize;
{ size_t const unused = fwrite(buff, 1, genBlockSize, stdout); (void)unused; } { size_t const unused = fwrite(buff, 1, genBlockSize, stdout); (void)unused; }
/* update dict */ /* update dict */

View File

@ -139,7 +139,7 @@ static unsigned DiB_loadFiles(void* buffer, size_t* bufferSizePtr,
} }
DISPLAYLEVEL(2, "\r%79s\r", ""); DISPLAYLEVEL(2, "\r%79s\r", "");
*bufferSizePtr = pos; *bufferSizePtr = pos;
DISPLAYLEVEL(4, "loaded : %u KB \n", (U32)(pos >> 10)) DISPLAYLEVEL(4, "loaded : %u KB \n", (unsigned)(pos >> 10))
return nbLoadedChunks; return nbLoadedChunks;
} }
@ -249,7 +249,7 @@ static fileStats DiB_fileStats(const char** fileNamesTable, unsigned nbFiles, si
fs.oneSampleTooLarge |= (chunkSize > 2*SAMPLESIZE_MAX); fs.oneSampleTooLarge |= (chunkSize > 2*SAMPLESIZE_MAX);
fs.nbSamples += nbSamples; fs.nbSamples += nbSamples;
} }
DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (U32)(fs.totalSizeToLoad >> 10)); DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (unsigned)(fs.totalSizeToLoad >> 10));
return fs; return fs;
} }
@ -358,7 +358,7 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize,
goto _cleanup; goto _cleanup;
} }
/* save dict */ /* save dict */
DISPLAYLEVEL(2, "Save dictionary of size %u into file %s \n", (U32)dictSize, dictFileName); DISPLAYLEVEL(2, "Save dictionary of size %u into file %s \n", (unsigned)dictSize, dictFileName);
DiB_saveDict(dictFileName, dictBuffer, dictSize); DiB_saveDict(dictFileName, dictBuffer, dictSize);
} }

View File

@ -279,9 +279,9 @@ static U32 g_checksumFlag = 1;
void FIO_setChecksumFlag(unsigned checksumFlag) { g_checksumFlag = checksumFlag; } void FIO_setChecksumFlag(unsigned checksumFlag) { g_checksumFlag = checksumFlag; }
static U32 g_removeSrcFile = 0; static U32 g_removeSrcFile = 0;
void FIO_setRemoveSrcFile(unsigned flag) { g_removeSrcFile = (flag>0); } void FIO_setRemoveSrcFile(unsigned flag) { g_removeSrcFile = (flag>0); }
static U32 g_memLimit = 0; static unsigned g_memLimit = 0;
void FIO_setMemLimit(unsigned memLimit) { g_memLimit = memLimit; } void FIO_setMemLimit(unsigned memLimit) { g_memLimit = memLimit; }
static U32 g_nbWorkers = 1; static unsigned g_nbWorkers = 1;
void FIO_setNbWorkers(unsigned nbWorkers) { void FIO_setNbWorkers(unsigned nbWorkers) {
#ifndef ZSTD_MULTITHREAD #ifndef ZSTD_MULTITHREAD
if (nbWorkers > 0) DISPLAYLEVEL(2, "Note : multi-threading is disabled \n"); if (nbWorkers > 0) DISPLAYLEVEL(2, "Note : multi-threading is disabled \n");
@ -295,7 +295,7 @@ void FIO_setBlockSize(unsigned blockSize) {
g_blockSize = blockSize; g_blockSize = blockSize;
} }
#define FIO_OVERLAP_LOG_NOTSET 9999 #define FIO_OVERLAP_LOG_NOTSET 9999
static U32 g_overlapLog = FIO_OVERLAP_LOG_NOTSET; static unsigned g_overlapLog = FIO_OVERLAP_LOG_NOTSET;
void FIO_setOverlapLog(unsigned overlapLog){ void FIO_setOverlapLog(unsigned overlapLog){
if (overlapLog && g_nbWorkers==0) if (overlapLog && g_nbWorkers==0)
DISPLAYLEVEL(2, "Setting overlapLog is useless in single-thread mode \n"); DISPLAYLEVEL(2, "Setting overlapLog is useless in single-thread mode \n");
@ -656,11 +656,11 @@ FIO_compressGzFrame(cRess_t* ress,
} }
if (srcFileSize == UTIL_FILESIZE_UNKNOWN) if (srcFileSize == UTIL_FILESIZE_UNKNOWN)
DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%",
(U32)(inFileSize>>20), (unsigned)(inFileSize>>20),
(double)outFileSize/inFileSize*100) (double)outFileSize/inFileSize*100)
else else
DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%",
(U32)(inFileSize>>20), (U32)(srcFileSize>>20), (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20),
(double)outFileSize/inFileSize*100); (double)outFileSize/inFileSize*100);
} }
@ -744,11 +744,11 @@ FIO_compressLzmaFrame(cRess_t* ress,
} } } }
if (srcFileSize == UTIL_FILESIZE_UNKNOWN) if (srcFileSize == UTIL_FILESIZE_UNKNOWN)
DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%",
(U32)(inFileSize>>20), (unsigned)(inFileSize>>20),
(double)outFileSize/inFileSize*100) (double)outFileSize/inFileSize*100)
else else
DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%",
(U32)(inFileSize>>20), (U32)(srcFileSize>>20), (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20),
(double)outFileSize/inFileSize*100); (double)outFileSize/inFileSize*100);
if (ret == LZMA_STREAM_END) break; if (ret == LZMA_STREAM_END) break;
} }
@ -820,11 +820,11 @@ FIO_compressLz4Frame(cRess_t* ress,
outFileSize += outSize; outFileSize += outSize;
if (srcFileSize == UTIL_FILESIZE_UNKNOWN) { if (srcFileSize == UTIL_FILESIZE_UNKNOWN) {
DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%",
(U32)(inFileSize>>20), (unsigned)(inFileSize>>20),
(double)outFileSize/inFileSize*100) (double)outFileSize/inFileSize*100)
} else { } else {
DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%",
(U32)(inFileSize>>20), (U32)(srcFileSize>>20), (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20),
(double)outFileSize/inFileSize*100); (double)outFileSize/inFileSize*100);
} }
@ -897,7 +897,7 @@ FIO_compressZstdFrame(const cRess_t* ressPtr,
/* Fill input Buffer */ /* Fill input Buffer */
size_t const inSize = fread(ress.srcBuffer, (size_t)1, ress.srcBufferSize, srcFile); size_t const inSize = fread(ress.srcBuffer, (size_t)1, ress.srcBufferSize, srcFile);
ZSTD_inBuffer inBuff = { ress.srcBuffer, inSize, 0 }; ZSTD_inBuffer inBuff = { ress.srcBuffer, inSize, 0 };
DISPLAYLEVEL(6, "fread %u bytes from source \n", (U32)inSize); DISPLAYLEVEL(6, "fread %u bytes from source \n", (unsigned)inSize);
*readsize += inSize; *readsize += inSize;
if ((inSize == 0) || (*readsize == fileSize)) if ((inSize == 0) || (*readsize == fileSize))
@ -919,7 +919,7 @@ FIO_compressZstdFrame(const cRess_t* ressPtr,
/* Write compressed stream */ /* Write compressed stream */
DISPLAYLEVEL(6, "ZSTD_compress_generic(end:%u) => input pos(%u)<=(%u)size ; output generated %u bytes \n", DISPLAYLEVEL(6, "ZSTD_compress_generic(end:%u) => input pos(%u)<=(%u)size ; output generated %u bytes \n",
(U32)directive, (U32)inBuff.pos, (U32)inBuff.size, (U32)outBuff.pos); (unsigned)directive, (unsigned)inBuff.pos, (unsigned)inBuff.size, (unsigned)outBuff.pos);
if (outBuff.pos) { if (outBuff.pos) {
size_t const sizeCheck = fwrite(ress.dstBuffer, 1, outBuff.pos, dstFile); size_t const sizeCheck = fwrite(ress.dstBuffer, 1, outBuff.pos, dstFile);
if (sizeCheck != outBuff.pos) if (sizeCheck != outBuff.pos)
@ -937,14 +937,14 @@ FIO_compressZstdFrame(const cRess_t* ressPtr,
if (g_displayLevel >= 3) { if (g_displayLevel >= 3) {
DISPLAYUPDATE(3, "\r(L%i) Buffered :%4u MB - Consumed :%4u MB - Compressed :%4u MB => %.2f%% ", DISPLAYUPDATE(3, "\r(L%i) Buffered :%4u MB - Consumed :%4u MB - Compressed :%4u MB => %.2f%% ",
compressionLevel, compressionLevel,
(U32)((zfp.ingested - zfp.consumed) >> 20), (unsigned)((zfp.ingested - zfp.consumed) >> 20),
(U32)(zfp.consumed >> 20), (unsigned)(zfp.consumed >> 20),
(U32)(zfp.produced >> 20), (unsigned)(zfp.produced >> 20),
cShare ); cShare );
} else { /* summarized notifications if == 2; */ } else { /* summarized notifications if == 2; */
DISPLAYLEVEL(2, "\rRead : %u ", (U32)(zfp.consumed >> 20)); DISPLAYLEVEL(2, "\rRead : %u ", (unsigned)(zfp.consumed >> 20));
if (fileSize != UTIL_FILESIZE_UNKNOWN) if (fileSize != UTIL_FILESIZE_UNKNOWN)
DISPLAYLEVEL(2, "/ %u ", (U32)(fileSize >> 20)); DISPLAYLEVEL(2, "/ %u ", (unsigned)(fileSize >> 20));
DISPLAYLEVEL(2, "MB ==> %2.f%% ", cShare); DISPLAYLEVEL(2, "MB ==> %2.f%% ", cShare);
DELAY_NEXT_UPDATE(); DELAY_NEXT_UPDATE();
} }
@ -1000,8 +1000,8 @@ FIO_compressZstdFrame(const cRess_t* ressPtr,
assert(inputPresented > 0); assert(inputPresented > 0);
DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n", DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n",
inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100, inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100,
(U32)newlyIngested, (U32)newlyConsumed, (unsigned)newlyIngested, (unsigned)newlyConsumed,
(U32)newlyFlushed, (U32)newlyProduced); (unsigned)newlyFlushed, (unsigned)newlyProduced);
if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */ if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */
&& (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */ && (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */
&& (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */ && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */
@ -1063,7 +1063,7 @@ FIO_compressFilename_internal(cRess_t ress,
U64 readsize = 0; U64 readsize = 0;
U64 compressedfilesize = 0; U64 compressedfilesize = 0;
U64 const fileSize = UTIL_getFileSize(srcFileName); U64 const fileSize = UTIL_getFileSize(srcFileName);
DISPLAYLEVEL(5, "%s: %u bytes \n", srcFileName, (U32)fileSize); DISPLAYLEVEL(5, "%s: %u bytes \n", srcFileName, (unsigned)fileSize);
/* compression format selection */ /* compression format selection */
switch (g_compressionType) { switch (g_compressionType) {
@ -1505,12 +1505,12 @@ static void FIO_zstdErrorHelp(dRess_t* ress, size_t err, char const* srcFileName
err = ZSTD_getFrameHeader(&header, ress->srcBuffer, ress->srcBufferLoaded); err = ZSTD_getFrameHeader(&header, ress->srcBuffer, ress->srcBufferLoaded);
if (err == 0) { if (err == 0) {
unsigned long long const windowSize = header.windowSize; unsigned long long const windowSize = header.windowSize;
U32 const windowLog = FIO_highbit64(windowSize) + ((windowSize & (windowSize - 1)) != 0); unsigned const windowLog = FIO_highbit64(windowSize) + ((windowSize & (windowSize - 1)) != 0);
assert(g_memLimit > 0); assert(g_memLimit > 0);
DISPLAYLEVEL(1, "%s : Window size larger than maximum : %llu > %u\n", DISPLAYLEVEL(1, "%s : Window size larger than maximum : %llu > %u\n",
srcFileName, windowSize, g_memLimit); srcFileName, windowSize, g_memLimit);
if (windowLog <= ZSTD_WINDOWLOG_MAX) { if (windowLog <= ZSTD_WINDOWLOG_MAX) {
U32 const windowMB = (U32)((windowSize >> 20) + ((windowSize & ((1 MB) - 1)) != 0)); unsigned const windowMB = (unsigned)((windowSize >> 20) + ((windowSize & ((1 MB) - 1)) != 0));
assert(windowSize < (U64)(1ULL << 52)); /* ensure now overflow for windowMB */ assert(windowSize < (U64)(1ULL << 52)); /* ensure now overflow for windowMB */
DISPLAYLEVEL(1, "%s : Use --long=%u or --memory=%uMB\n", DISPLAYLEVEL(1, "%s : Use --long=%u or --memory=%uMB\n",
srcFileName, windowLog, windowMB); srcFileName, windowLog, windowMB);
@ -1562,7 +1562,7 @@ static unsigned long long FIO_decompressZstdFrame(dRess_t* ress,
storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, outBuff.pos, storedSkips); storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, outBuff.pos, storedSkips);
frameSize += outBuff.pos; frameSize += outBuff.pos;
DISPLAYUPDATE(2, "\r%-20.20s : %u MB... ", DISPLAYUPDATE(2, "\r%-20.20s : %u MB... ",
srcFileName, (U32)((alreadyDecoded+frameSize)>>20) ); srcFileName, (unsigned)((alreadyDecoded+frameSize)>>20) );
if (inBuff.pos > 0) { if (inBuff.pos > 0) {
memmove(ress->srcBuffer, (char*)ress->srcBuffer + inBuff.pos, inBuff.size - inBuff.pos); memmove(ress->srcBuffer, (char*)ress->srcBuffer + inBuff.pos, inBuff.size - inBuff.pos);
@ -2382,13 +2382,13 @@ int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int dis
total.numSkippableFrames + total.numActualFrames, total.numSkippableFrames + total.numActualFrames,
total.numSkippableFrames, total.numSkippableFrames,
compressedSizeUnit, unitStr, compressedSizeUnit, unitStr,
checkString, total.nbFiles); checkString, (unsigned)total.nbFiles);
} else { } else {
DISPLAYOUT("%6d %5d %7.2f %2s %9.2f %2s %5.3f %5s %u files\n", DISPLAYOUT("%6d %5d %7.2f %2s %9.2f %2s %5.3f %5s %u files\n",
total.numSkippableFrames + total.numActualFrames, total.numSkippableFrames + total.numActualFrames,
total.numSkippableFrames, total.numSkippableFrames,
compressedSizeUnit, unitStr, decompressedSizeUnit, unitStr, compressedSizeUnit, unitStr, decompressedSizeUnit, unitStr,
ratio, checkString, total.nbFiles); ratio, checkString, (unsigned)total.nbFiles);
} } } }
return error; return error;
} }

View File

@ -121,7 +121,7 @@ int main(int argc, const char** argv)
DISPLAYLEVEL(4, "Compressible data Generator \n"); DISPLAYLEVEL(4, "Compressible data Generator \n");
if (probaU32!=COMPRESSIBILITY_DEFAULT) if (probaU32!=COMPRESSIBILITY_DEFAULT)
DISPLAYLEVEL(3, "Compressibility : %i%%\n", probaU32); DISPLAYLEVEL(3, "Compressibility : %i%%\n", probaU32);
DISPLAYLEVEL(3, "Seed = %u \n", seed); DISPLAYLEVEL(3, "Seed = %u \n", (unsigned)seed);
RDG_genStdout(size, (double)probaU32/100, litProba, seed); RDG_genStdout(size, (double)probaU32/100, litProba, seed);
DISPLAYLEVEL(1, "\n"); DISPLAYLEVEL(1, "\n");

View File

@ -22,7 +22,7 @@
#define ZDICT_STATIC_LINKING_ONLY #define ZDICT_STATIC_LINKING_ONLY
#include "zdict.h" #include "zdict.h"
// Direct access to internal compression functions is required /* Direct access to internal compression functions is required */
#include "zstd_compress.c" #include "zstd_compress.c"
#define XXH_STATIC_LINKING_ONLY #define XXH_STATIC_LINKING_ONLY
@ -72,7 +72,7 @@ static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER;
/*-******************************************************* /*-*******************************************************
* Random function * Random function
*********************************************************/ *********************************************************/
static unsigned RAND(unsigned* src) static U32 RAND(U32* src)
{ {
#define RAND_rotl32(x,r) ((x << r) | (x >> (32 - r))) #define RAND_rotl32(x,r) ((x << r) | (x >> (32 - r)))
static const U32 prime1 = 2654435761U; static const U32 prime1 = 2654435761U;
@ -350,7 +350,7 @@ static void writeFrameHeader(U32* seed, frame_t* frame, dictInfo info)
} }
} }
DISPLAYLEVEL(3, " frame content size:\t%u\n", (U32)fh.contentSize); DISPLAYLEVEL(3, " frame content size:\t%u\n", (unsigned)fh.contentSize);
DISPLAYLEVEL(3, " frame window size:\t%u\n", fh.windowSize); DISPLAYLEVEL(3, " frame window size:\t%u\n", fh.windowSize);
DISPLAYLEVEL(3, " content size flag:\t%d\n", contentSizeFlag); DISPLAYLEVEL(3, " content size flag:\t%d\n", contentSizeFlag);
DISPLAYLEVEL(3, " single segment flag:\t%d\n", singleSegment); DISPLAYLEVEL(3, " single segment flag:\t%d\n", singleSegment);
@ -412,7 +412,7 @@ static size_t writeLiteralsBlockSimple(U32* seed, frame_t* frame, size_t content
/* RLE literals */ /* RLE literals */
BYTE const symb = (BYTE) (RAND(seed) % 256); BYTE const symb = (BYTE) (RAND(seed) % 256);
DISPLAYLEVEL(4, " rle literals: 0x%02x\n", (U32)symb); DISPLAYLEVEL(4, " rle literals: 0x%02x\n", (unsigned)symb);
memset(LITERAL_BUFFER, symb, litSize); memset(LITERAL_BUFFER, symb, litSize);
op[0] = symb; op[0] = symb;
@ -432,9 +432,9 @@ static size_t writeHufHeader(U32* seed, HUF_CElt* hufTable, void* dst, size_t ds
BYTE* op = ostart; BYTE* op = ostart;
unsigned huffLog = 11; unsigned huffLog = 11;
U32 maxSymbolValue = 255; unsigned maxSymbolValue = 255;
U32 count[HUF_SYMBOLVALUE_MAX+1]; unsigned count[HUF_SYMBOLVALUE_MAX+1];
/* Scan input and build symbol stats */ /* Scan input and build symbol stats */
{ size_t const largest = HIST_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, WKSP, sizeof(WKSP)); { size_t const largest = HIST_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, WKSP, sizeof(WKSP));
@ -568,8 +568,8 @@ static size_t writeLiteralsBlockCompressed(U32* seed, frame_t* frame, size_t con
op += compressedSize; op += compressedSize;
compressedSize += hufHeaderSize; compressedSize += hufHeaderSize;
DISPLAYLEVEL(5, " regenerated size: %u\n", (U32)litSize); DISPLAYLEVEL(5, " regenerated size: %u\n", (unsigned)litSize);
DISPLAYLEVEL(5, " compressed size: %u\n", (U32)compressedSize); DISPLAYLEVEL(5, " compressed size: %u\n", (unsigned)compressedSize);
if (compressedSize >= litSize) { if (compressedSize >= litSize) {
DISPLAYLEVEL(5, " trying again\n"); DISPLAYLEVEL(5, " trying again\n");
/* if we have to try again, reset the stats so we don't accidentally /* if we have to try again, reset the stats so we don't accidentally
@ -656,7 +656,7 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
excessMatch = remainingMatch - numSequences * MIN_SEQ_LEN; excessMatch = remainingMatch - numSequences * MIN_SEQ_LEN;
} }
DISPLAYLEVEL(5, " total match lengths: %u\n", (U32)remainingMatch); DISPLAYLEVEL(5, " total match lengths: %u\n", (unsigned)remainingMatch);
for (i = 0; i < numSequences; i++) { for (i = 0; i < numSequences; i++) {
/* Generate match and literal lengths by exponential distribution to /* Generate match and literal lengths by exponential distribution to
* ensure nice numbers */ * ensure nice numbers */
@ -748,12 +748,13 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
frame->stats.rep[0] = offset; frame->stats.rep[0] = offset;
} }
DISPLAYLEVEL(6, " LL: %5u OF: %5u ML: %5u", literalLen, offset, matchLen); DISPLAYLEVEL(6, " LL: %5u OF: %5u ML: %5u",
(unsigned)literalLen, (unsigned)offset, (unsigned)matchLen);
DISPLAYLEVEL(7, " srcPos: %8u seqNb: %3u", DISPLAYLEVEL(7, " srcPos: %8u seqNb: %3u",
(U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart), i); (unsigned)((BYTE*)srcPtr - (BYTE*)frame->srcStart), (unsigned)i);
DISPLAYLEVEL(6, "\n"); DISPLAYLEVEL(6, "\n");
if (offsetCode < 3) { if (offsetCode < 3) {
DISPLAYLEVEL(7, " repeat offset: %d\n", repIndex); DISPLAYLEVEL(7, " repeat offset: %d\n", (int)repIndex);
} }
/* use libzstd sequence handling */ /* use libzstd sequence handling */
ZSTD_storeSeq(seqStore, literalLen, literals, offsetCode, ZSTD_storeSeq(seqStore, literalLen, literals, offsetCode,
@ -766,8 +767,8 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
memcpy(srcPtr, literals, literalsSize); memcpy(srcPtr, literals, literalsSize);
srcPtr += literalsSize; srcPtr += literalsSize;
DISPLAYLEVEL(6, " excess literals: %5u", (U32)literalsSize); DISPLAYLEVEL(6, " excess literals: %5u", (unsigned)literalsSize);
DISPLAYLEVEL(7, " srcPos: %8u", (U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart)); DISPLAYLEVEL(7, " srcPos: %8u", (unsigned)((BYTE*)srcPtr - (BYTE*)frame->srcStart));
DISPLAYLEVEL(6, "\n"); DISPLAYLEVEL(6, "\n");
return numSequences; return numSequences;
@ -800,7 +801,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
size_t nbSeq) size_t nbSeq)
{ {
/* This code is mostly copied from ZSTD_compressSequences in zstd_compress.c */ /* This code is mostly copied from ZSTD_compressSequences in zstd_compress.c */
U32 count[MaxSeq+1]; unsigned count[MaxSeq+1];
S16 norm[MaxSeq+1]; S16 norm[MaxSeq+1];
FSE_CTable* CTable_LitLength = frame->stats.litlengthCTable; FSE_CTable* CTable_LitLength = frame->stats.litlengthCTable;
FSE_CTable* CTable_OffsetBits = frame->stats.offcodeCTable; FSE_CTable* CTable_OffsetBits = frame->stats.offcodeCTable;
@ -835,7 +836,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
ZSTD_seqToCodes(seqStorePtr); ZSTD_seqToCodes(seqStorePtr);
/* CTable for Literal Lengths */ /* CTable for Literal Lengths */
{ U32 max = MaxLL; { unsigned max = MaxLL;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, WKSP, sizeof(WKSP)); /* cannot fail */ size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, WKSP, sizeof(WKSP)); /* cannot fail */
assert(!HIST_isError(mostFrequent)); assert(!HIST_isError(mostFrequent));
if (mostFrequent == nbSeq) { if (mostFrequent == nbSeq) {
@ -867,7 +868,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
/* CTable for Offsets */ /* CTable for Offsets */
/* see Literal Lengths for descriptions of mode choices */ /* see Literal Lengths for descriptions of mode choices */
{ U32 max = MaxOff; { unsigned max = MaxOff;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, WKSP, sizeof(WKSP)); /* cannot fail */ size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, WKSP, sizeof(WKSP)); /* cannot fail */
assert(!HIST_isError(mostFrequent)); assert(!HIST_isError(mostFrequent));
if (mostFrequent == nbSeq) { if (mostFrequent == nbSeq) {
@ -895,7 +896,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
/* CTable for MatchLengths */ /* CTable for MatchLengths */
/* see Literal Lengths for descriptions of mode choices */ /* see Literal Lengths for descriptions of mode choices */
{ U32 max = MaxML; { unsigned max = MaxML;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, WKSP, sizeof(WKSP)); /* cannot fail */ size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, WKSP, sizeof(WKSP)); /* cannot fail */
assert(!HIST_isError(mostFrequent)); assert(!HIST_isError(mostFrequent));
if (mostFrequent == nbSeq) { if (mostFrequent == nbSeq) {
@ -927,7 +928,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
initSymbolSet(ofCodeTable, nbSeq, frame->stats.offsetSymbolSet, 28); initSymbolSet(ofCodeTable, nbSeq, frame->stats.offsetSymbolSet, 28);
initSymbolSet(mlCodeTable, nbSeq, frame->stats.matchlengthSymbolSet, 52); initSymbolSet(mlCodeTable, nbSeq, frame->stats.matchlengthSymbolSet, 52);
DISPLAYLEVEL(5, " LL type: %d OF type: %d ML type: %d\n", LLtype, Offtype, MLtype); DISPLAYLEVEL(5, " LL type: %d OF type: %d ML type: %d\n", (unsigned)LLtype, (unsigned)Offtype, (unsigned)MLtype);
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
@ -1014,11 +1015,11 @@ static size_t writeCompressedBlock(U32* seed, frame_t* frame, size_t contentSize
literalsSize = writeLiteralsBlock(seed, frame, contentSize); literalsSize = writeLiteralsBlock(seed, frame, contentSize);
DISPLAYLEVEL(4, " literals size: %u\n", (U32)literalsSize); DISPLAYLEVEL(4, " literals size: %u\n", (unsigned)literalsSize);
nbSeq = writeSequencesBlock(seed, frame, contentSize, literalsSize, info); nbSeq = writeSequencesBlock(seed, frame, contentSize, literalsSize, info);
DISPLAYLEVEL(4, " number of sequences: %u\n", (U32)nbSeq); DISPLAYLEVEL(4, " number of sequences: %u\n", (unsigned)nbSeq);
return (BYTE*)frame->data - blockStart; return (BYTE*)frame->data - blockStart;
} }
@ -1034,7 +1035,7 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
BYTE *op = header + 3; BYTE *op = header + 3;
DISPLAYLEVEL(4, " block:\n"); DISPLAYLEVEL(4, " block:\n");
DISPLAYLEVEL(4, " block content size: %u\n", (U32)contentSize); DISPLAYLEVEL(4, " block content size: %u\n", (unsigned)contentSize);
DISPLAYLEVEL(4, " last block: %s\n", lastBlock ? "yes" : "no"); DISPLAYLEVEL(4, " last block: %s\n", lastBlock ? "yes" : "no");
if (blockTypeDesc == 0) { if (blockTypeDesc == 0) {
@ -1082,7 +1083,7 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
frame->src = (BYTE*)frame->src + contentSize; frame->src = (BYTE*)frame->src + contentSize;
DISPLAYLEVEL(4, " block type: %s\n", BLOCK_TYPES[blockType]); DISPLAYLEVEL(4, " block type: %s\n", BLOCK_TYPES[blockType]);
DISPLAYLEVEL(4, " block size field: %u\n", (U32)blockSize); DISPLAYLEVEL(4, " block size field: %u\n", (unsigned)blockSize);
header[0] = (BYTE) ((lastBlock | (blockType << 1) | (blockSize << 3)) & 0xff); header[0] = (BYTE) ((lastBlock | (blockType << 1) | (blockSize << 3)) & 0xff);
MEM_writeLE16(header + 1, (U16) (blockSize >> 5)); MEM_writeLE16(header + 1, (U16) (blockSize >> 5));
@ -1124,7 +1125,7 @@ static void writeChecksum(frame_t* frame)
{ {
/* write checksum so implementations can verify their output */ /* write checksum so implementations can verify their output */
U64 digest = XXH64(frame->srcStart, (BYTE*)frame->src-(BYTE*)frame->srcStart, 0); U64 digest = XXH64(frame->srcStart, (BYTE*)frame->src-(BYTE*)frame->srcStart, 0);
DISPLAYLEVEL(3, " checksum: %08x\n", (U32)digest); DISPLAYLEVEL(3, " checksum: %08x\n", (unsigned)digest);
MEM_writeLE32(frame->data, (U32)digest); MEM_writeLE32(frame->data, (U32)digest);
frame->data = (BYTE*)frame->data + 4; frame->data = (BYTE*)frame->data + 4;
} }
@ -1185,7 +1186,7 @@ static U32 generateCompressedBlock(U32 seed, frame_t* frame, dictInfo info)
size_t blockContentSize; size_t blockContentSize;
int blockWritten = 0; int blockWritten = 0;
BYTE* op; BYTE* op;
DISPLAYLEVEL(4, "block seed: %u\n", seed); DISPLAYLEVEL(4, "block seed: %u\n", (unsigned)seed);
initFrame(frame); initFrame(frame);
op = (BYTE*)frame->data; op = (BYTE*)frame->data;
@ -1222,7 +1223,7 @@ static U32 generateCompressedBlock(U32 seed, frame_t* frame, dictInfo info)
DISPLAYLEVEL(5, " can't compress block : try again \n"); DISPLAYLEVEL(5, " can't compress block : try again \n");
} else { } else {
blockWritten = 1; blockWritten = 1;
DISPLAYLEVEL(4, " block size: %u \n", (U32)cSize); DISPLAYLEVEL(4, " block size: %u \n", (unsigned)cSize);
frame->src = (BYTE*)frame->src + blockContentSize; frame->src = (BYTE*)frame->src + blockContentSize;
} }
} }
@ -1233,7 +1234,7 @@ static U32 generateCompressedBlock(U32 seed, frame_t* frame, dictInfo info)
static U32 generateFrame(U32 seed, frame_t* fr, dictInfo info) static U32 generateFrame(U32 seed, frame_t* fr, dictInfo info)
{ {
/* generate a complete frame */ /* generate a complete frame */
DISPLAYLEVEL(3, "frame seed: %u\n", seed); DISPLAYLEVEL(3, "frame seed: %u\n", (unsigned)seed);
initFrame(fr); initFrame(fr);
writeFrameHeader(&seed, fr, info); writeFrameHeader(&seed, fr, info);
@ -1479,8 +1480,8 @@ static int runBlockTest(U32* seed)
{ size_t const r = testDecodeRawBlock(&fr); { size_t const r = testDecodeRawBlock(&fr);
if (ZSTD_isError(r)) { if (ZSTD_isError(r)) {
DISPLAY("Error in block mode on test seed %u: %s\n", seedCopy, DISPLAY("Error in block mode on test seed %u: %s\n",
ZSTD_getErrorName(r)); (unsigned)seedCopy, ZSTD_getErrorName(r));
return 1; return 1;
} }
} }
@ -1488,7 +1489,7 @@ static int runBlockTest(U32* seed)
{ size_t const r = testDecodeWithDict(*seed, gt_block); { size_t const r = testDecodeWithDict(*seed, gt_block);
if (ZSTD_isError(r)) { if (ZSTD_isError(r)) {
DISPLAY("Error in block mode with dictionary on test seed %u: %s\n", DISPLAY("Error in block mode with dictionary on test seed %u: %s\n",
seedCopy, ZSTD_getErrorName(r)); (unsigned)seedCopy, ZSTD_getErrorName(r));
return 1; return 1;
} }
} }
@ -1506,21 +1507,21 @@ static int runFrameTest(U32* seed)
{ size_t const r = testDecodeSimple(&fr); { size_t const r = testDecodeSimple(&fr);
if (ZSTD_isError(r)) { if (ZSTD_isError(r)) {
DISPLAY("Error in simple mode on test seed %u: %s\n", DISPLAY("Error in simple mode on test seed %u: %s\n",
seedCopy, ZSTD_getErrorName(r)); (unsigned)seedCopy, ZSTD_getErrorName(r));
return 1; return 1;
} }
} }
{ size_t const r = testDecodeStreaming(&fr); { size_t const r = testDecodeStreaming(&fr);
if (ZSTD_isError(r)) { if (ZSTD_isError(r)) {
DISPLAY("Error in streaming mode on test seed %u: %s\n", DISPLAY("Error in streaming mode on test seed %u: %s\n",
seedCopy, ZSTD_getErrorName(r)); (unsigned)seedCopy, ZSTD_getErrorName(r));
return 1; return 1;
} }
} }
{ size_t const r = testDecodeWithDict(*seed, gt_frame); /* avoid big dictionaries */ { size_t const r = testDecodeWithDict(*seed, gt_frame); /* avoid big dictionaries */
if (ZSTD_isError(r)) { if (ZSTD_isError(r)) {
DISPLAY("Error in dictionary mode on test seed %u: %s\n", DISPLAY("Error in dictionary mode on test seed %u: %s\n",
seedCopy, ZSTD_getErrorName(r)); (unsigned)seedCopy, ZSTD_getErrorName(r));
return 1; return 1;
} }
} }
@ -1537,7 +1538,7 @@ static int runTestMode(U32 seed, unsigned numFiles, unsigned const testDurationS
if (numFiles == 0 && !testDurationS) numFiles = 1; if (numFiles == 0 && !testDurationS) numFiles = 1;
DISPLAY("seed: %u\n", seed); DISPLAY("seed: %u\n", (unsigned)seed);
for (fnum = 0; fnum < numFiles || UTIL_clockSpanMicro(startClock) < maxClockSpan; fnum++) { for (fnum = 0; fnum < numFiles || UTIL_clockSpanMicro(startClock) < maxClockSpan; fnum++) {
if (fnum < numFiles) if (fnum < numFiles)
@ -1567,7 +1568,7 @@ static int generateFile(U32 seed, const char* const path,
{ {
frame_t fr; frame_t fr;
DISPLAY("seed: %u\n", seed); DISPLAY("seed: %u\n", (unsigned)seed);
{ dictInfo const info = initDictInfo(0, 0, NULL, 0); { dictInfo const info = initDictInfo(0, 0, NULL, 0);
if (genType == gt_frame) { if (genType == gt_frame) {
@ -1589,7 +1590,7 @@ static int generateCorpus(U32 seed, unsigned numFiles, const char* const path,
char outPath[MAX_PATH]; char outPath[MAX_PATH];
unsigned fnum; unsigned fnum;
DISPLAY("seed: %u\n", seed); DISPLAY("seed: %u\n", (unsigned)seed);
for (fnum = 0; fnum < numFiles; fnum++) { for (fnum = 0; fnum < numFiles; fnum++) {
frame_t fr; frame_t fr;

View File

@ -64,10 +64,10 @@ static const size_t g_sampleSize = 10000000;
/*_************************************ /*_************************************
* Benchmark Parameters * Benchmark Parameters
**************************************/ **************************************/
static U32 g_nbIterations = NBLOOPS; static unsigned g_nbIterations = NBLOOPS;
static double g_compressibility = COMPRESSIBILITY_DEFAULT; static double g_compressibility = COMPRESSIBILITY_DEFAULT;
static void BMK_SetNbIterations(U32 nbLoops) static void BMK_SetNbIterations(unsigned nbLoops)
{ {
g_nbIterations = nbLoops; g_nbIterations = nbLoops;
DISPLAY("- %i iterations -\n", g_nbIterations); DISPLAY("- %i iterations -\n", g_nbIterations);
@ -316,7 +316,7 @@ static size_t local_ZSTD_decompressContinue(const void* src, size_t srcSize,
/*_******************************************************* /*_*******************************************************
* Bench functions * Bench functions
*********************************************************/ *********************************************************/
static size_t benchMem(U32 benchNb, static size_t benchMem(unsigned benchNb,
const void* src, size_t srcSize, const void* src, size_t srcSize,
int cLevel, ZSTD_compressionParameters cparams) int cLevel, ZSTD_compressionParameters cparams)
{ {
@ -606,7 +606,7 @@ static int benchFiles(U32 benchNb,
benchedSize = (size_t)inFileSize; benchedSize = (size_t)inFileSize;
if ((U64)benchedSize < inFileSize) { if ((U64)benchedSize < inFileSize) {
DISPLAY("Not enough memory for '%s' full size; testing %u MB only... \n", DISPLAY("Not enough memory for '%s' full size; testing %u MB only... \n",
inFileName, (U32)(benchedSize>>20)); inFileName, (unsigned)(benchedSize>>20));
} } } }
/* Alloc */ /* Alloc */

View File

@ -47,8 +47,8 @@
#define MB *(1U<<20) #define MB *(1U<<20)
#define GB *(1U<<30) #define GB *(1U<<30)
static const U32 FUZ_compressibility_default = 50; static const int FUZ_compressibility_default = 50;
static const U32 nbTestsDefault = 30000; static const int nbTestsDefault = 30000;
/*-************************************ /*-************************************
@ -88,7 +88,7 @@ void FUZ_bug976(void)
#define MAX(a,b) ((a)>(b)?(a):(b)) #define MAX(a,b) ((a)>(b)?(a):(b))
#define FUZ_rotl32(x,r) ((x << r) | (x >> (32 - r))) #define FUZ_rotl32(x,r) ((x << r) | (x >> (32 - r)))
static unsigned FUZ_rand(unsigned* src) static U32 FUZ_rand(U32* src)
{ {
static const U32 prime1 = 2654435761U; static const U32 prime1 = 2654435761U;
static const U32 prime2 = 2246822519U; static const U32 prime2 = 2246822519U;
@ -100,7 +100,7 @@ static unsigned FUZ_rand(unsigned* src)
return rand32 >> 5; return rand32 >> 5;
} }
static unsigned FUZ_highbit32(U32 v32) static U32 FUZ_highbit32(U32 v32)
{ {
unsigned nbBits = 0; unsigned nbBits = 0;
if (v32==0) return 0; if (v32==0) return 0;
@ -155,7 +155,7 @@ static void* FUZ_mallocDebug(void* counter, size_t size)
void* const ptr = malloc(size); void* const ptr = malloc(size);
if (ptr==NULL) return NULL; if (ptr==NULL) return NULL;
DISPLAYLEVEL(4, "allocating %u KB => effectively %u KB \n", DISPLAYLEVEL(4, "allocating %u KB => effectively %u KB \n",
(U32)(size >> 10), (U32)(malloc_size(ptr) >> 10)); /* OS-X specific */ (unsigned)(size >> 10), (unsigned)(malloc_size(ptr) >> 10)); /* OS-X specific */
mcPtr->totalMalloc += size; mcPtr->totalMalloc += size;
mcPtr->currentMalloc += size; mcPtr->currentMalloc += size;
if (mcPtr->currentMalloc > mcPtr->peakMalloc) if (mcPtr->currentMalloc > mcPtr->peakMalloc)
@ -167,7 +167,7 @@ static void* FUZ_mallocDebug(void* counter, size_t size)
static void FUZ_freeDebug(void* counter, void* address) static void FUZ_freeDebug(void* counter, void* address)
{ {
mallocCounter_t* const mcPtr = (mallocCounter_t*)counter; mallocCounter_t* const mcPtr = (mallocCounter_t*)counter;
DISPLAYLEVEL(4, "freeing %u KB \n", (U32)(malloc_size(address) >> 10)); DISPLAYLEVEL(4, "freeing %u KB \n", (unsigned)(malloc_size(address) >> 10));
mcPtr->nbFree += 1; mcPtr->nbFree += 1;
mcPtr->currentMalloc -= malloc_size(address); /* OS-X specific */ mcPtr->currentMalloc -= malloc_size(address); /* OS-X specific */
free(address); free(address);
@ -176,9 +176,9 @@ static void FUZ_freeDebug(void* counter, void* address)
static void FUZ_displayMallocStats(mallocCounter_t count) static void FUZ_displayMallocStats(mallocCounter_t count)
{ {
DISPLAYLEVEL(3, "peak:%6u KB, nbMallocs:%2u, total:%6u KB \n", DISPLAYLEVEL(3, "peak:%6u KB, nbMallocs:%2u, total:%6u KB \n",
(U32)(count.peakMalloc >> 10), (unsigned)(count.peakMalloc >> 10),
count.nbMalloc, count.nbMalloc,
(U32)(count.totalMalloc >> 10)); (unsigned)(count.totalMalloc >> 10));
} }
static int FUZ_mallocTests_internal(unsigned seed, double compressibility, unsigned part, static int FUZ_mallocTests_internal(unsigned seed, double compressibility, unsigned part,
@ -226,7 +226,7 @@ static int FUZ_mallocTests_internal(unsigned seed, double compressibility, unsig
/* advanced MT API test */ /* advanced MT API test */
if (part <= 3) if (part <= 3)
{ U32 nbThreads; { unsigned nbThreads;
for (nbThreads=1; nbThreads<=4; nbThreads++) { for (nbThreads=1; nbThreads<=4; nbThreads++) {
int compressionLevel; int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) { for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
@ -244,7 +244,7 @@ static int FUZ_mallocTests_internal(unsigned seed, double compressibility, unsig
/* advanced MT streaming API test */ /* advanced MT streaming API test */
if (part <= 4) if (part <= 4)
{ U32 nbThreads; { unsigned nbThreads;
for (nbThreads=1; nbThreads<=4; nbThreads++) { for (nbThreads=1; nbThreads<=4; nbThreads++) {
int compressionLevel; int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) { for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
@ -310,7 +310,7 @@ static int basicUnitTests(U32 seed, double compressibility)
void* const compressedBuffer = malloc(compressedBufferSize); void* const compressedBuffer = malloc(compressedBufferSize);
void* const decodedBuffer = malloc(CNBuffSize); void* const decodedBuffer = malloc(CNBuffSize);
int testResult = 0; int testResult = 0;
U32 testNb=0; unsigned testNb=0;
size_t cSize; size_t cSize;
/* Create compressible noise */ /* Create compressible noise */
@ -322,33 +322,33 @@ static int basicUnitTests(U32 seed, double compressibility)
RDG_genBuffer(CNBuffer, CNBuffSize, compressibility, 0., seed); RDG_genBuffer(CNBuffer, CNBuffSize, compressibility, 0., seed);
/* Basic tests */ /* Basic tests */
DISPLAYLEVEL(3, "test%3i : ZSTD_getErrorName : ", testNb++); DISPLAYLEVEL(3, "test%3u : ZSTD_getErrorName : ", testNb++);
{ const char* errorString = ZSTD_getErrorName(0); { const char* errorString = ZSTD_getErrorName(0);
DISPLAYLEVEL(3, "OK : %s \n", errorString); DISPLAYLEVEL(3, "OK : %s \n", errorString);
} }
DISPLAYLEVEL(3, "test%3i : ZSTD_getErrorName with wrong value : ", testNb++); DISPLAYLEVEL(3, "test%3u : ZSTD_getErrorName with wrong value : ", testNb++);
{ const char* errorString = ZSTD_getErrorName(499); { const char* errorString = ZSTD_getErrorName(499);
DISPLAYLEVEL(3, "OK : %s \n", errorString); DISPLAYLEVEL(3, "OK : %s \n", errorString);
} }
DISPLAYLEVEL(3, "test%3i : min compression level : ", testNb++); DISPLAYLEVEL(3, "test%3u : min compression level : ", testNb++);
{ int const mcl = ZSTD_minCLevel(); { int const mcl = ZSTD_minCLevel();
DISPLAYLEVEL(3, "%i (OK) \n", mcl); DISPLAYLEVEL(3, "%i (OK) \n", mcl);
} }
DISPLAYLEVEL(3, "test%3i : compress %u bytes : ", testNb++, (U32)CNBuffSize); DISPLAYLEVEL(3, "test%3u : compress %u bytes : ", testNb++, (unsigned)CNBuffSize);
{ ZSTD_CCtx* const cctx = ZSTD_createCCtx(); { ZSTD_CCtx* const cctx = ZSTD_createCCtx();
if (cctx==NULL) goto _output_error; if (cctx==NULL) goto _output_error;
CHECKPLUS(r, ZSTD_compressCCtx(cctx, CHECKPLUS(r, ZSTD_compressCCtx(cctx,
compressedBuffer, compressedBufferSize, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, 1), CNBuffer, CNBuffSize, 1),
cSize=r ); cSize=r );
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : size of cctx for level 1 : ", testNb++); DISPLAYLEVEL(3, "test%3i : size of cctx for level 1 : ", testNb++);
{ size_t const cctxSize = ZSTD_sizeof_CCtx(cctx); { size_t const cctxSize = ZSTD_sizeof_CCtx(cctx);
DISPLAYLEVEL(3, "%u bytes \n", (U32)cctxSize); DISPLAYLEVEL(3, "%u bytes \n", (unsigned)cctxSize);
} }
ZSTD_freeCCtx(cctx); ZSTD_freeCCtx(cctx);
} }
@ -374,7 +374,7 @@ static int basicUnitTests(U32 seed, double compressibility)
} }
DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (U32)CNBuffSize); DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (unsigned)CNBuffSize);
{ size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize); { size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (r != CNBuffSize) goto _output_error; } if (r != CNBuffSize) goto _output_error; }
DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "OK \n");
@ -664,7 +664,7 @@ static int basicUnitTests(U32 seed, double compressibility)
{ U32 const maxNbAttempts = 1100; /* nb of usages before triggering size down is handled within zstd_compress.c. { U32 const maxNbAttempts = 1100; /* nb of usages before triggering size down is handled within zstd_compress.c.
* currently defined as 128x, but could be adjusted in the future. * currently defined as 128x, but could be adjusted in the future.
* make this test long enough so that it's not too much tied to the current definition within zstd_compress.c */ * make this test long enough so that it's not too much tied to the current definition within zstd_compress.c */
U32 u; unsigned u;
for (u=0; u<maxNbAttempts; u++) { for (u=0; u<maxNbAttempts; u++) {
CHECK_Z(ZSTD_compressCCtx(largeCCtx, compressedBuffer, compressedBufferSize, CNBuffer, 1, 1)); CHECK_Z(ZSTD_compressCCtx(largeCCtx, compressedBuffer, compressedBufferSize, CNBuffer, 1, 1));
if (ZSTD_sizeof_CCtx(largeCCtx) < largeCCtxSize) break; /* sized down */ if (ZSTD_sizeof_CCtx(largeCCtx) < largeCCtxSize) break; /* sized down */
@ -707,7 +707,7 @@ static int basicUnitTests(U32 seed, double compressibility)
CNBuffer, CNBuffSize, STATIC_CCTX_LEVEL), CNBuffer, CNBuffSize, STATIC_CCTX_LEVEL),
cSize=r ); cSize=r );
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n",
(U32)cSize, (double)cSize/CNBuffSize*100); (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : simple decompression test with static DCtx : ", testNb++); DISPLAYLEVEL(3, "test%3i : simple decompression test with static DCtx : ", testNb++);
{ size_t const r = ZSTD_decompressDCtx(staticDCtx, { size_t const r = ZSTD_decompressDCtx(staticDCtx,
@ -780,23 +780,23 @@ static int basicUnitTests(U32 seed, double compressibility)
} }
DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : compress %u bytes with 2 threads : ", testNb++, (U32)CNBuffSize); DISPLAYLEVEL(3, "test%3u : compress %u bytes with 2 threads : ", testNb++, (unsigned)CNBuffSize);
CHECKPLUS(r, ZSTDMT_compressCCtx(mtctx, CHECKPLUS(r, ZSTDMT_compressCCtx(mtctx,
compressedBuffer, compressedBufferSize, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, CNBuffer, CNBuffSize,
1), 1),
cSize=r ); cSize=r );
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : decompressed size test : ", testNb++); DISPLAYLEVEL(3, "test%3i : decompressed size test : ", testNb++);
{ unsigned long long const rSize = ZSTD_getFrameContentSize(compressedBuffer, cSize); { unsigned long long const rSize = ZSTD_getFrameContentSize(compressedBuffer, cSize);
if (rSize != CNBuffSize) { if (rSize != CNBuffSize) {
DISPLAY("ZSTD_getFrameContentSize incorrect : %u != %u \n", (U32)rSize, (U32)CNBuffSize); DISPLAY("ZSTD_getFrameContentSize incorrect : %u != %u \n", (unsigned)rSize, (unsigned)CNBuffSize);
goto _output_error; goto _output_error;
} } } }
DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (U32)CNBuffSize); DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (unsigned)CNBuffSize);
{ size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize); { size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (r != CNBuffSize) goto _output_error; } if (r != CNBuffSize) goto _output_error; }
DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "OK \n");
@ -818,9 +818,9 @@ static int basicUnitTests(U32 seed, double compressibility)
NULL, params, 3 /*overlapRLog*/), NULL, params, 3 /*overlapRLog*/),
cSize=r ); cSize=r );
} }
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (U32)CNBuffSize); DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, (unsigned)CNBuffSize);
{ size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize); { size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (r != CNBuffSize) goto _output_error; } if (r != CNBuffSize) goto _output_error; }
DISPLAYLEVEL(3, "OK \n"); DISPLAYLEVEL(3, "OK \n");
@ -890,7 +890,7 @@ static int basicUnitTests(U32 seed, double compressibility)
CHECKPLUS(r, ZSTD_compressEnd(ctxOrig, compressedBuffer, compressedBufferSize, CHECKPLUS(r, ZSTD_compressEnd(ctxOrig, compressedBuffer, compressedBufferSize,
(const char*)CNBuffer + dictSize, CNBuffSize - dictSize), (const char*)CNBuffer + dictSize, CNBuffSize - dictSize),
cSize += r); cSize += r);
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : frame built with flat dictionary should be decompressible : ", testNb++); DISPLAYLEVEL(3, "test%3i : frame built with flat dictionary should be decompressible : ", testNb++);
CHECKPLUS(r, ZSTD_decompress_usingDict(dctx, CHECKPLUS(r, ZSTD_decompress_usingDict(dctx,
@ -908,7 +908,7 @@ static int basicUnitTests(U32 seed, double compressibility)
cSize += r); cSize += r);
if (cSize != cSizeOrig) goto _output_error; /* should be identical ==> same size */ if (cSize != cSizeOrig) goto _output_error; /* should be identical ==> same size */
} }
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : frame built with duplicated context should be decompressible : ", testNb++); DISPLAYLEVEL(3, "test%3i : frame built with duplicated context should be decompressible : ", testNb++);
CHECKPLUS(r, ZSTD_decompress_usingDict(dctx, CHECKPLUS(r, ZSTD_decompress_usingDict(dctx,
@ -922,7 +922,7 @@ static int basicUnitTests(U32 seed, double compressibility)
{ ZSTD_DDict* const ddict = ZSTD_createDDict(CNBuffer, dictSize); { ZSTD_DDict* const ddict = ZSTD_createDDict(CNBuffer, dictSize);
size_t const r = ZSTD_decompress_usingDDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, ddict); size_t const r = ZSTD_decompress_usingDDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, ddict);
if (r != CNBuffSize - dictSize) goto _output_error; if (r != CNBuffSize - dictSize) goto _output_error;
DISPLAYLEVEL(3, "OK (size of DDict : %u) \n", (U32)ZSTD_sizeof_DDict(ddict)); DISPLAYLEVEL(3, "OK (size of DDict : %u) \n", (unsigned)ZSTD_sizeof_DDict(ddict));
ZSTD_freeDDict(ddict); ZSTD_freeDDict(ddict);
} }
@ -935,7 +935,7 @@ static int basicUnitTests(U32 seed, double compressibility)
if (r != CNBuffSize - dictSize) goto _output_error; if (r != CNBuffSize - dictSize) goto _output_error;
} }
free(ddictBuffer); free(ddictBuffer);
DISPLAYLEVEL(3, "OK (size of static DDict : %u) \n", (U32)ddictBufferSize); DISPLAYLEVEL(3, "OK (size of static DDict : %u) \n", (unsigned)ddictBufferSize);
} }
DISPLAYLEVEL(3, "test%3i : check content size on duplicated context : ", testNb++); DISPLAYLEVEL(3, "test%3i : check content size on duplicated context : ", testNb++);
@ -984,7 +984,7 @@ static int basicUnitTests(U32 seed, double compressibility)
{ size_t const sDictSize = ZDICT_trainFromBuffer(dictBuffer, dictBufferCapacity, { size_t const sDictSize = ZDICT_trainFromBuffer(dictBuffer, dictBufferCapacity,
decodedBuffer, samplesSizes, nbSamples); decodedBuffer, samplesSizes, nbSamples);
if (ZDICT_isError(sDictSize)) goto _output_error; if (ZDICT_isError(sDictSize)) goto _output_error;
DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)sDictSize); DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)sDictSize);
} }
DISPLAYLEVEL(3, "test%3i : dictBuilder : ", testNb++); DISPLAYLEVEL(3, "test%3i : dictBuilder : ", testNb++);
@ -992,7 +992,7 @@ static int basicUnitTests(U32 seed, double compressibility)
dictSize = ZDICT_trainFromBuffer(dictBuffer, dictBufferCapacity, dictSize = ZDICT_trainFromBuffer(dictBuffer, dictBufferCapacity,
CNBuffer, samplesSizes, nbSamples); CNBuffer, samplesSizes, nbSamples);
if (ZDICT_isError(dictSize)) goto _output_error; if (ZDICT_isError(dictSize)) goto _output_error;
DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)dictSize); DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)dictSize);
DISPLAYLEVEL(3, "test%3i : Multithreaded COVER dictBuilder : ", testNb++); DISPLAYLEVEL(3, "test%3i : Multithreaded COVER dictBuilder : ", testNb++);
{ U32 u; for (u=0; u<nbSamples; u++) samplesSizes[u] = sampleUnitSize; } { U32 u; for (u=0; u<nbSamples; u++) samplesSizes[u] = sampleUnitSize; }
@ -1006,7 +1006,7 @@ static int basicUnitTests(U32 seed, double compressibility)
&coverParams); &coverParams);
if (ZDICT_isError(dictSize)) goto _output_error; if (ZDICT_isError(dictSize)) goto _output_error;
} }
DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)dictSize); DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)dictSize);
DISPLAYLEVEL(3, "test%3i : Multithreaded FASTCOVER dictBuilder : ", testNb++); DISPLAYLEVEL(3, "test%3i : Multithreaded FASTCOVER dictBuilder : ", testNb++);
{ U32 u; for (u=0; u<nbSamples; u++) samplesSizes[u] = sampleUnitSize; } { U32 u; for (u=0; u<nbSamples; u++) samplesSizes[u] = sampleUnitSize; }
@ -1020,19 +1020,19 @@ static int basicUnitTests(U32 seed, double compressibility)
&fastCoverParams); &fastCoverParams);
if (ZDICT_isError(dictSize)) goto _output_error; if (ZDICT_isError(dictSize)) goto _output_error;
} }
DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)dictSize); DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)dictSize);
DISPLAYLEVEL(3, "test%3i : check dictID : ", testNb++); DISPLAYLEVEL(3, "test%3i : check dictID : ", testNb++);
dictID = ZDICT_getDictID(dictBuffer, dictSize); dictID = ZDICT_getDictID(dictBuffer, dictSize);
if (dictID==0) goto _output_error; if (dictID==0) goto _output_error;
DISPLAYLEVEL(3, "OK : %u \n", dictID); DISPLAYLEVEL(3, "OK : %u \n", (unsigned)dictID);
DISPLAYLEVEL(3, "test%3i : compress with dictionary : ", testNb++); DISPLAYLEVEL(3, "test%3i : compress with dictionary : ", testNb++);
cSize = ZSTD_compress_usingDict(cctx, compressedBuffer, compressedBufferSize, cSize = ZSTD_compress_usingDict(cctx, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, CNBuffer, CNBuffSize,
dictBuffer, dictSize, 4); dictBuffer, dictSize, 4);
if (ZSTD_isError(cSize)) goto _output_error; if (ZSTD_isError(cSize)) goto _output_error;
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : retrieve dictID from dictionary : ", testNb++); DISPLAYLEVEL(3, "test%3i : retrieve dictID from dictionary : ", testNb++);
{ U32 const did = ZSTD_getDictID_fromDict(dictBuffer, dictSize); { U32 const did = ZSTD_getDictID_fromDict(dictBuffer, dictSize);
@ -1060,7 +1060,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : estimate CDict size : ", testNb++); DISPLAYLEVEL(3, "test%3i : estimate CDict size : ", testNb++);
{ ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize); { ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
size_t const estimatedSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byRef); size_t const estimatedSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byRef);
DISPLAYLEVEL(3, "OK : %u \n", (U32)estimatedSize); DISPLAYLEVEL(3, "OK : %u \n", (unsigned)estimatedSize);
} }
DISPLAYLEVEL(3, "test%3i : compress with CDict ", testNb++); DISPLAYLEVEL(3, "test%3i : compress with CDict ", testNb++);
@ -1068,13 +1068,13 @@ static int basicUnitTests(U32 seed, double compressibility)
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize,
ZSTD_dlm_byRef, ZSTD_dct_auto, ZSTD_dlm_byRef, ZSTD_dct_auto,
cParams, ZSTD_defaultCMem); cParams, ZSTD_defaultCMem);
DISPLAYLEVEL(3, "(size : %u) : ", (U32)ZSTD_sizeof_CDict(cdict)); DISPLAYLEVEL(3, "(size : %u) : ", (unsigned)ZSTD_sizeof_CDict(cdict));
cSize = ZSTD_compress_usingCDict(cctx, compressedBuffer, compressedBufferSize, cSize = ZSTD_compress_usingCDict(cctx, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, cdict); CNBuffer, CNBuffSize, cdict);
ZSTD_freeCDict(cdict); ZSTD_freeCDict(cdict);
if (ZSTD_isError(cSize)) goto _output_error; if (ZSTD_isError(cSize)) goto _output_error;
} }
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : retrieve dictID from frame : ", testNb++); DISPLAYLEVEL(3, "test%3i : retrieve dictID from frame : ", testNb++);
{ U32 const did = ZSTD_getDictID_fromFrame(compressedBuffer, cSize); { U32 const did = ZSTD_getDictID_fromFrame(compressedBuffer, cSize);
@ -1119,7 +1119,7 @@ static int basicUnitTests(U32 seed, double compressibility)
} } } }
free(cdictBuffer); free(cdictBuffer);
} } } }
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : ZSTD_compress_usingCDict_advanced, no contentSize, no dictID : ", testNb++); DISPLAYLEVEL(3, "test%3i : ZSTD_compress_usingCDict_advanced, no contentSize, no dictID : ", testNb++);
{ ZSTD_frameParameters const fParams = { 0 /* frameSize */, 1 /* checksum */, 1 /* noDictID*/ }; { ZSTD_frameParameters const fParams = { 0 /* frameSize */, 1 /* checksum */, 1 /* noDictID*/ };
@ -1130,7 +1130,7 @@ static int basicUnitTests(U32 seed, double compressibility)
ZSTD_freeCDict(cdict); ZSTD_freeCDict(cdict);
if (ZSTD_isError(cSize)) goto _output_error; if (ZSTD_isError(cSize)) goto _output_error;
} }
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : try retrieving contentSize from frame : ", testNb++); DISPLAYLEVEL(3, "test%3i : try retrieving contentSize from frame : ", testNb++);
{ U64 const contentSize = ZSTD_getFrameContentSize(compressedBuffer, cSize); { U64 const contentSize = ZSTD_getFrameContentSize(compressedBuffer, cSize);
@ -1157,7 +1157,7 @@ static int basicUnitTests(U32 seed, double compressibility)
dictBuffer, dictSize, p); dictBuffer, dictSize, p);
if (ZSTD_isError(cSize)) goto _output_error; if (ZSTD_isError(cSize)) goto _output_error;
} }
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(3, "test%3i : frame built without dictID should be decompressible : ", testNb++); DISPLAYLEVEL(3, "test%3i : frame built without dictID should be decompressible : ", testNb++);
{ ZSTD_DCtx* const dctx = ZSTD_createDCtx(); assert(dctx != NULL); { ZSTD_DCtx* const dctx = ZSTD_createDCtx(); assert(dctx != NULL);
@ -1298,12 +1298,12 @@ static int basicUnitTests(U32 seed, double compressibility)
CNBuffer, samplesSizes, nbSamples, CNBuffer, samplesSizes, nbSamples,
params); params);
if (ZDICT_isError(dictSize)) goto _output_error; if (ZDICT_isError(dictSize)) goto _output_error;
DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)dictSize); DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)dictSize);
DISPLAYLEVEL(3, "test%3i : check dictID : ", testNb++); DISPLAYLEVEL(3, "test%3i : check dictID : ", testNb++);
dictID = ZDICT_getDictID(dictBuffer, dictSize); dictID = ZDICT_getDictID(dictBuffer, dictSize);
if (dictID==0) goto _output_error; if (dictID==0) goto _output_error;
DISPLAYLEVEL(3, "OK : %u \n", dictID); DISPLAYLEVEL(3, "OK : %u \n", (unsigned)dictID);
DISPLAYLEVEL(3, "test%3i : ZDICT_optimizeTrainFromBuffer_cover : ", testNb++); DISPLAYLEVEL(3, "test%3i : ZDICT_optimizeTrainFromBuffer_cover : ", testNb++);
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
@ -1312,12 +1312,12 @@ static int basicUnitTests(U32 seed, double compressibility)
CNBuffer, samplesSizes, CNBuffer, samplesSizes,
nbSamples / 4, &params); nbSamples / 4, &params);
if (ZDICT_isError(optDictSize)) goto _output_error; if (ZDICT_isError(optDictSize)) goto _output_error;
DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (U32)optDictSize); DISPLAYLEVEL(3, "OK, created dictionary of size %u \n", (unsigned)optDictSize);
DISPLAYLEVEL(3, "test%3i : check dictID : ", testNb++); DISPLAYLEVEL(3, "test%3i : check dictID : ", testNb++);
dictID = ZDICT_getDictID(dictBuffer, optDictSize); dictID = ZDICT_getDictID(dictBuffer, optDictSize);
if (dictID==0) goto _output_error; if (dictID==0) goto _output_error;
DISPLAYLEVEL(3, "OK : %u \n", dictID); DISPLAYLEVEL(3, "OK : %u \n", (unsigned)dictID);
ZSTD_freeCCtx(cctx); ZSTD_freeCCtx(cctx);
free(dictBuffer); free(dictBuffer);
@ -1397,7 +1397,7 @@ static int basicUnitTests(U32 seed, double compressibility)
cSize = compressedSize; cSize = compressedSize;
xxh64 = XXH64(compressedBuffer, compressedSize, 0); xxh64 = XXH64(compressedBuffer, compressedSize, 0);
} }
DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (U32)inputSize, (U32)cSize); DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (unsigned)inputSize, (unsigned)cSize);
ZSTD_freeCCtx(cctx); ZSTD_freeCCtx(cctx);
} }
@ -1412,7 +1412,7 @@ static int basicUnitTests(U32 seed, double compressibility)
CHECK(result); CHECK(result);
if (result != cSize) goto _output_error; /* must result in same compressed result, hence same size */ if (result != cSize) goto _output_error; /* must result in same compressed result, hence same size */
if (XXH64(compressedBuffer, result, 0) != xxh64) goto _output_error; /* must result in exactly same content, hence same hash */ if (XXH64(compressedBuffer, result, 0) != xxh64) goto _output_error; /* must result in exactly same content, hence same hash */
DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (U32)inputSize, (U32)result); DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (unsigned)inputSize, (unsigned)result);
} }
ZSTD_freeCCtx(cctx); ZSTD_freeCCtx(cctx);
} }
@ -1466,7 +1466,7 @@ static int basicUnitTests(U32 seed, double compressibility)
if (in.pos != in.size) goto _output_error; if (in.pos != in.size) goto _output_error;
cSize = out.pos; cSize = out.pos;
} }
DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (U32)inputSize, (U32)cSize); DISPLAYLEVEL(3, "OK (compress : %u -> %u bytes)\n", (unsigned)inputSize, (unsigned)cSize);
DISPLAYLEVEL(3, "test%3i : decompress normally (should fail) : ", testNb++); DISPLAYLEVEL(3, "test%3i : decompress normally (should fail) : ", testNb++);
{ size_t const decodeResult = ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize); { size_t const decodeResult = ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize);
@ -1493,7 +1493,7 @@ static int basicUnitTests(U32 seed, double compressibility)
if (result != 0) goto _output_error; if (result != 0) goto _output_error;
if (in.pos != in.size) goto _output_error; if (in.pos != in.size) goto _output_error;
if (out.pos != inputSize) goto _output_error; if (out.pos != inputSize) goto _output_error;
DISPLAYLEVEL(3, "streaming OK : regenerated %u bytes \n", (U32)out.pos); DISPLAYLEVEL(3, "streaming OK : regenerated %u bytes \n", (unsigned)out.pos);
} }
ZSTD_freeCCtx(cctx); ZSTD_freeCCtx(cctx);
@ -1593,7 +1593,7 @@ static int basicUnitTests(U32 seed, double compressibility)
memset(CNBuffer, 0, ZEROESLENGTH); memset(CNBuffer, 0, ZEROESLENGTH);
{ CHECK_V(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(ZEROESLENGTH), CNBuffer, ZEROESLENGTH, 1) ); { CHECK_V(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(ZEROESLENGTH), CNBuffer, ZEROESLENGTH, 1) );
cSize = r; } cSize = r; }
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/ZEROESLENGTH*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/ZEROESLENGTH*100);
DISPLAYLEVEL(3, "test%3i : decompress %u zeroes : ", testNb++, ZEROESLENGTH); DISPLAYLEVEL(3, "test%3i : decompress %u zeroes : ", testNb++, ZEROESLENGTH);
{ CHECK_V(r, ZSTD_decompress(decodedBuffer, ZEROESLENGTH, compressedBuffer, cSize) ); { CHECK_V(r, ZSTD_decompress(decodedBuffer, ZEROESLENGTH, compressedBuffer, cSize) );
@ -1647,7 +1647,7 @@ static int basicUnitTests(U32 seed, double compressibility)
{ CHECK_V(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(_3BYTESTESTLENGTH), { CHECK_V(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(_3BYTESTESTLENGTH),
CNBuffer, _3BYTESTESTLENGTH, 19) ); CNBuffer, _3BYTESTESTLENGTH, 19) );
cSize = r; } cSize = r; }
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/_3BYTESTESTLENGTH*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/_3BYTESTESTLENGTH*100);
DISPLAYLEVEL(3, "test%3i : decompress lots 3-bytes sequence : ", testNb++); DISPLAYLEVEL(3, "test%3i : decompress lots 3-bytes sequence : ", testNb++);
{ CHECK_V(r, ZSTD_decompress(decodedBuffer, _3BYTESTESTLENGTH, compressedBuffer, cSize) ); { CHECK_V(r, ZSTD_decompress(decodedBuffer, _3BYTESTESTLENGTH, compressedBuffer, cSize) );
@ -1816,7 +1816,7 @@ static size_t FUZ_randomLength(U32* seed, U32 maxLog)
if (cond) { \ if (cond) { \
DISPLAY("Error => "); \ DISPLAY("Error => "); \
DISPLAY(__VA_ARGS__); \ DISPLAY(__VA_ARGS__); \
DISPLAY(" (seed %u, test nb %u) \n", seed, testNb); \ DISPLAY(" (seed %u, test nb %u) \n", (unsigned)seed, testNb); \
goto _output_error; \ goto _output_error; \
} } } }
@ -1826,12 +1826,12 @@ static size_t FUZ_randomLength(U32* seed, U32 maxLog)
if (ZSTD_isError(err)) { \ if (ZSTD_isError(err)) { \
DISPLAY("Error => %s : %s ", \ DISPLAY("Error => %s : %s ", \
#f, ZSTD_getErrorName(err)); \ #f, ZSTD_getErrorName(err)); \
DISPLAY(" (seed %u, test nb %u) \n", seed, testNb); \ DISPLAY(" (seed %u, test nb %u) \n", (unsigned)seed, testNb); \
goto _output_error; \ goto _output_error; \
} } } }
static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxDurationS, double compressibility, int bigTests) static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, U32 const maxDurationS, double compressibility, int bigTests)
{ {
static const U32 maxSrcLog = 23; static const U32 maxSrcLog = 23;
static const U32 maxSampleLog = 22; static const U32 maxSampleLog = 22;
@ -1846,7 +1846,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
ZSTD_CCtx* const ctx = ZSTD_createCCtx(); ZSTD_CCtx* const ctx = ZSTD_createCCtx();
ZSTD_DCtx* const dctx = ZSTD_createDCtx(); ZSTD_DCtx* const dctx = ZSTD_createDCtx();
U32 result = 0; U32 result = 0;
U32 testNb = 0; unsigned testNb = 0;
U32 coreSeed = seed; U32 coreSeed = seed;
UTIL_time_t const startClock = UTIL_getTime(); UTIL_time_t const startClock = UTIL_getTime();
U64 const maxClockSpan = maxDurationS * SEC_TO_MICRO; U64 const maxClockSpan = maxDurationS * SEC_TO_MICRO;
@ -1931,13 +1931,13 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
assert(cSize > 3); assert(cSize > 3);
{ const size_t missing = (FUZ_rand(&lseed) % (cSize-2)) + 1; { const size_t missing = (FUZ_rand(&lseed) % (cSize-2)) + 1;
const size_t tooSmallSize = cSize - missing; const size_t tooSmallSize = cSize - missing;
const U32 endMark = 0x4DC2B1A9; const unsigned endMark = 0x4DC2B1A9;
memcpy(dstBuffer+tooSmallSize, &endMark, 4); memcpy(dstBuffer+tooSmallSize, &endMark, sizeof(endMark));
DISPLAYLEVEL(5, "fuzzer t%u: compress into too small buffer of size %u (missing %u bytes) \n", DISPLAYLEVEL(5, "fuzzer t%u: compress into too small buffer of size %u (missing %u bytes) \n",
testNb, (unsigned)tooSmallSize, (unsigned)missing); testNb, (unsigned)tooSmallSize, (unsigned)missing);
{ size_t const errorCode = ZSTD_compressCCtx(ctx, dstBuffer, tooSmallSize, sampleBuffer, sampleSize, cLevel); { size_t const errorCode = ZSTD_compressCCtx(ctx, dstBuffer, tooSmallSize, sampleBuffer, sampleSize, cLevel);
CHECK(!ZSTD_isError(errorCode), "ZSTD_compressCCtx should have failed ! (buffer too small : %u < %u)", (U32)tooSmallSize, (U32)cSize); } CHECK(!ZSTD_isError(errorCode), "ZSTD_compressCCtx should have failed ! (buffer too small : %u < %u)", (unsigned)tooSmallSize, (unsigned)cSize); }
{ U32 endCheck; memcpy(&endCheck, dstBuffer+tooSmallSize, 4); { unsigned endCheck; memcpy(&endCheck, dstBuffer+tooSmallSize, sizeof(endCheck));
CHECK(endCheck != endMark, "ZSTD_compressCCtx : dst buffer overflow (check.%08X != %08X.mark)", endCheck, endMark); } CHECK(endCheck != endMark, "ZSTD_compressCCtx : dst buffer overflow (check.%08X != %08X.mark)", endCheck, endMark); }
} } } }
@ -1956,9 +1956,9 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
DISPLAYLEVEL(5, "fuzzer t%u: simple decompression test \n", testNb); DISPLAYLEVEL(5, "fuzzer t%u: simple decompression test \n", testNb);
{ size_t const margin = (FUZ_rand(&lseed) & 1) ? 0 : (FUZ_rand(&lseed) & 31) + 1; { size_t const margin = (FUZ_rand(&lseed) & 1) ? 0 : (FUZ_rand(&lseed) & 31) + 1;
size_t const dSize = ZSTD_decompress(dstBuffer, sampleSize + margin, cBuffer, cSize); size_t const dSize = ZSTD_decompress(dstBuffer, sampleSize + margin, cBuffer, cSize);
CHECK(dSize != sampleSize, "ZSTD_decompress failed (%s) (srcSize : %u ; cSize : %u)", ZSTD_getErrorName(dSize), (U32)sampleSize, (U32)cSize); CHECK(dSize != sampleSize, "ZSTD_decompress failed (%s) (srcSize : %u ; cSize : %u)", ZSTD_getErrorName(dSize), (unsigned)sampleSize, (unsigned)cSize);
{ U64 const crcDest = XXH64(dstBuffer, sampleSize, 0); { U64 const crcDest = XXH64(dstBuffer, sampleSize, 0);
CHECK(crcOrig != crcDest, "decompression result corrupted (pos %u / %u)", (U32)findDiff(sampleBuffer, dstBuffer, sampleSize), (U32)sampleSize); CHECK(crcOrig != crcDest, "decompression result corrupted (pos %u / %u)", (unsigned)findDiff(sampleBuffer, dstBuffer, sampleSize), (unsigned)sampleSize);
} } } }
free(sampleBuffer); /* no longer useful after this point */ free(sampleBuffer); /* no longer useful after this point */
@ -1983,7 +1983,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
static const BYTE token = 0xA9; static const BYTE token = 0xA9;
dstBuffer[tooSmallSize] = token; dstBuffer[tooSmallSize] = token;
{ size_t const errorCode = ZSTD_decompress(dstBuffer, tooSmallSize, cBuffer, cSize); { size_t const errorCode = ZSTD_decompress(dstBuffer, tooSmallSize, cBuffer, cSize);
CHECK(!ZSTD_isError(errorCode), "ZSTD_decompress should have failed : %u > %u (dst buffer too small)", (U32)errorCode, (U32)tooSmallSize); } CHECK(!ZSTD_isError(errorCode), "ZSTD_decompress should have failed : %u > %u (dst buffer too small)", (unsigned)errorCode, (unsigned)tooSmallSize); }
CHECK(dstBuffer[tooSmallSize] != token, "ZSTD_decompress : dst buffer overflow"); CHECK(dstBuffer[tooSmallSize] != token, "ZSTD_decompress : dst buffer overflow");
} }
@ -2018,7 +2018,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
{ size_t const decompressResult = ZSTD_decompress(dstBuffer, sampleSize, cBuffer, cSize); { size_t const decompressResult = ZSTD_decompress(dstBuffer, sampleSize, cBuffer, cSize);
/* result *may* be an unlikely success, but even then, it must strictly respect dst buffer boundaries */ /* result *may* be an unlikely success, but even then, it must strictly respect dst buffer boundaries */
CHECK((!ZSTD_isError(decompressResult)) && (decompressResult>sampleSize), CHECK((!ZSTD_isError(decompressResult)) && (decompressResult>sampleSize),
"ZSTD_decompress on noisy src : result is too large : %u > %u (dst buffer)", (U32)decompressResult, (U32)sampleSize); "ZSTD_decompress on noisy src : result is too large : %u > %u (dst buffer)", (unsigned)decompressResult, (unsigned)sampleSize);
} }
{ U32 endCheck; memcpy(&endCheck, dstBuffer+sampleSize, 4); { U32 endCheck; memcpy(&endCheck, dstBuffer+sampleSize, 4);
CHECK(endMark!=endCheck, "ZSTD_decompress on noisy src : dst buffer overflow"); CHECK(endMark!=endCheck, "ZSTD_decompress on noisy src : dst buffer overflow");
@ -2039,7 +2039,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
dict = srcBuffer + (FUZ_rand(&lseed) % (srcBufferSize - dictSize)); dict = srcBuffer + (FUZ_rand(&lseed) % (srcBufferSize - dictSize));
DISPLAYLEVEL(6, "fuzzer t%u: Compressing up to <=%u bytes at level %i with dictionary size %u \n", DISPLAYLEVEL(6, "fuzzer t%u: Compressing up to <=%u bytes at level %i with dictionary size %u \n",
testNb, (U32)maxTestSize, cLevel, (U32)dictSize); testNb, (unsigned)maxTestSize, cLevel, (unsigned)dictSize);
if (FUZ_rand(&lseed) & 0xF) { if (FUZ_rand(&lseed) & 0xF) {
CHECK_Z ( ZSTD_compressBegin_usingDict(refCtx, dict, dictSize, cLevel) ); CHECK_Z ( ZSTD_compressBegin_usingDict(refCtx, dict, dictSize, cLevel) );
@ -2091,7 +2091,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
CHECK_Z(roundBuffSize); CHECK_Z(roundBuffSize);
CHECK((roundBuffSize > totalTestSize) && (zfh.frameContentSize!=ZSTD_CONTENTSIZE_UNKNOWN), CHECK((roundBuffSize > totalTestSize) && (zfh.frameContentSize!=ZSTD_CONTENTSIZE_UNKNOWN),
"ZSTD_decodingBufferSize_min() requires more memory (%u) than necessary (%u)", "ZSTD_decodingBufferSize_min() requires more memory (%u) than necessary (%u)",
(U32)roundBuffSize, (U32)totalTestSize ); (unsigned)roundBuffSize, (unsigned)totalTestSize );
} } } }
if (dictSize<8) dictSize=0, dict=NULL; /* disable dictionary */ if (dictSize<8) dictSize=0, dict=NULL; /* disable dictionary */
CHECK_Z( ZSTD_decompressBegin_usingDict(dctx, dict, dictSize) ); CHECK_Z( ZSTD_decompressBegin_usingDict(dctx, dict, dictSize) );
@ -2109,7 +2109,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
CHECK (totalCSize != cSize, "compressed data should be fully read") CHECK (totalCSize != cSize, "compressed data should be fully read")
{ U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0); { U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0);
CHECK(crcOrig != crcDest, "streaming decompressed data corrupted (pos %u / %u)", CHECK(crcOrig != crcDest, "streaming decompressed data corrupted (pos %u / %u)",
(U32)findDiff(mirrorBuffer, dstBuffer, totalTestSize), (U32)totalTestSize); (unsigned)findDiff(mirrorBuffer, dstBuffer, totalTestSize), (unsigned)totalTestSize);
} }
} /* for ( ; (testNb <= nbTests) */ } /* for ( ; (testNb <= nbTests) */
DISPLAY("\r%u fuzzer tests completed \n", testNb-1); DISPLAY("\r%u fuzzer tests completed \n", testNb-1);
@ -2143,10 +2143,10 @@ static int FUZ_usage(const char* programName)
DISPLAY( " %s [args]\n", programName); DISPLAY( " %s [args]\n", programName);
DISPLAY( "\n"); DISPLAY( "\n");
DISPLAY( "Arguments :\n"); DISPLAY( "Arguments :\n");
DISPLAY( " -i# : Nb of tests (default:%u) \n", nbTestsDefault); DISPLAY( " -i# : Nb of tests (default:%i) \n", nbTestsDefault);
DISPLAY( " -s# : Select seed (default:prompt user)\n"); DISPLAY( " -s# : Select seed (default:prompt user)\n");
DISPLAY( " -t# : Select starting test number (default:0)\n"); DISPLAY( " -t# : Select starting test number (default:0)\n");
DISPLAY( " -P# : Select compressibility in %% (default:%u%%)\n", FUZ_compressibility_default); DISPLAY( " -P# : Select compressibility in %% (default:%i%%)\n", FUZ_compressibility_default);
DISPLAY( " -v : verbose\n"); DISPLAY( " -v : verbose\n");
DISPLAY( " -p : pause at the end\n"); DISPLAY( " -p : pause at the end\n");
DISPLAY( " -h : display help and exit\n"); DISPLAY( " -h : display help and exit\n");
@ -2193,7 +2193,7 @@ int main(int argc, const char** argv)
int argNb; int argNb;
int nbTests = nbTestsDefault; int nbTests = nbTestsDefault;
int testNb = 0; int testNb = 0;
U32 proba = FUZ_compressibility_default; int proba = FUZ_compressibility_default;
int result = 0; int result = 0;
U32 mainPause = 0; U32 mainPause = 0;
U32 maxDuration = 0; U32 maxDuration = 0;
@ -2280,8 +2280,8 @@ int main(int argc, const char** argv)
seed = h % 10000; seed = h % 10000;
} }
DISPLAY("Seed = %u\n", seed); DISPLAY("Seed = %u\n", (unsigned)seed);
if (proba!=FUZ_compressibility_default) DISPLAY("Compressibility : %u%%\n", proba); if (proba!=FUZ_compressibility_default) DISPLAY("Compressibility : %i%%\n", proba);
if (memTestsOnly) { if (memTestsOnly) {
g_displayLevel = MAX(3, g_displayLevel); g_displayLevel = MAX(3, g_displayLevel);

View File

@ -201,7 +201,7 @@ static int invRangeMap(varInds_t param, U32 value)
} }
/* display of params */ /* display of params */
static void displayParamVal(FILE* f, varInds_t param, U32 value, int width) static void displayParamVal(FILE* f, varInds_t param, unsigned value, int width)
{ {
switch(param) { switch(param) {
case wlog_ind: case wlog_ind:
@ -636,7 +636,7 @@ static void optimizerAdjustInput(paramValues_t* pc, const size_t maxBlockSize)
if(adjust != pc->vals[wlog_ind]) { if(adjust != pc->vals[wlog_ind]) {
pc->vals[wlog_ind] = adjust; pc->vals[wlog_ind] = adjust;
DISPLAY("Warning: windowLog larger than src/block size, adjusted to %u\n", DISPLAY("Warning: windowLog larger than src/block size, adjusted to %u\n",
pc->vals[wlog_ind]); (unsigned)pc->vals[wlog_ind]);
} }
} }
} }
@ -652,7 +652,7 @@ static void optimizerAdjustInput(paramValues_t* pc, const size_t maxBlockSize)
if(pc->vals[clog_ind] > maxclog) { if(pc->vals[clog_ind] > maxclog) {
pc->vals[clog_ind] = maxclog; pc->vals[clog_ind] = maxclog;
DISPLAY("Warning: chainlog too much larger than windowLog size, adjusted to %u\n", DISPLAY("Warning: chainlog too much larger than windowLog size, adjusted to %u\n",
pc->vals[clog_ind]); (unsigned)pc->vals[clog_ind]);
} }
} }
@ -660,7 +660,7 @@ static void optimizerAdjustInput(paramValues_t* pc, const size_t maxBlockSize)
if(pc->vals[wlog_ind] + 1 < pc->vals[hlog_ind]) { if(pc->vals[wlog_ind] + 1 < pc->vals[hlog_ind]) {
pc->vals[hlog_ind] = pc->vals[wlog_ind] + 1; pc->vals[hlog_ind] = pc->vals[wlog_ind] + 1;
DISPLAY("Warning: hashlog too much larger than windowLog size, adjusted to %u\n", DISPLAY("Warning: hashlog too much larger than windowLog size, adjusted to %u\n",
pc->vals[hlog_ind]); (unsigned)pc->vals[hlog_ind]);
} }
} }
@ -668,7 +668,7 @@ static void optimizerAdjustInput(paramValues_t* pc, const size_t maxBlockSize)
if(pc->vals[slog_ind] > pc->vals[clog_ind]) { if(pc->vals[slog_ind] > pc->vals[clog_ind]) {
pc->vals[clog_ind] = pc->vals[slog_ind]; pc->vals[clog_ind] = pc->vals[slog_ind];
DISPLAY("Warning: searchLog larger than chainLog, adjusted to %u\n", DISPLAY("Warning: searchLog larger than chainLog, adjusted to %u\n",
pc->vals[slog_ind]); (unsigned)pc->vals[slog_ind]);
} }
} }
} }
@ -705,7 +705,7 @@ BMK_paramValues_into_commandLine(FILE* f, const paramValues_t params)
if (!first) { fprintf(f, ","); } if (!first) { fprintf(f, ","); }
fprintf(f,"%s=", g_paramNames[v]); fprintf(f,"%s=", g_paramNames[v]);
if (v == strt_ind) { fprintf(f,"%u", params.vals[v]); } if (v == strt_ind) { fprintf(f,"%u", (unsigned)params.vals[v]); }
else { displayParamVal(f, v, params.vals[v], 0); } else { displayParamVal(f, v, params.vals[v], 0); }
first = 0; first = 0;
} }
@ -1532,7 +1532,7 @@ static void display_params_tested(paramValues_t cParams)
varInds_t vi; varInds_t vi;
DISPLAYLEVEL(3, "\r testing :"); DISPLAYLEVEL(3, "\r testing :");
for (vi=0; vi < NUM_PARAMS; vi++) { for (vi=0; vi < NUM_PARAMS; vi++) {
DISPLAYLEVEL(3, "%3u,", cParams.vals[vi]); DISPLAYLEVEL(3, "%3u,", (unsigned)cParams.vals[vi]);
} }
DISPLAYLEVEL(3, "\b \r"); DISPLAYLEVEL(3, "\b \r");
} }
@ -2456,9 +2456,9 @@ optimizeForSize(const char* const * const fileNamesTable, const size_t nbFiles,
DISPLAYLEVEL(2, "optimizing for %lu Files", (unsigned long)nbFiles); DISPLAYLEVEL(2, "optimizing for %lu Files", (unsigned long)nbFiles);
} }
if(target.cSpeed != 0) { DISPLAYLEVEL(2," - limit compression speed %u MB/s", target.cSpeed >> 20); } if(target.cSpeed != 0) { DISPLAYLEVEL(2," - limit compression speed %u MB/s", (unsigned)(target.cSpeed >> 20)); }
if(target.dSpeed != 0) { DISPLAYLEVEL(2, " - limit decompression speed %u MB/s", target.dSpeed >> 20); } if(target.dSpeed != 0) { DISPLAYLEVEL(2, " - limit decompression speed %u MB/s", (unsigned)(target.dSpeed >> 20)); }
if(target.cMem != (U32)-1) { DISPLAYLEVEL(2, " - limit memory %u MB", target.cMem >> 20); } if(target.cMem != (U32)-1) { DISPLAYLEVEL(2, " - limit memory %u MB", (unsigned)(target.cMem >> 20)); }
DISPLAYLEVEL(2, "\n"); DISPLAYLEVEL(2, "\n");
init_clockGranularity(); init_clockGranularity();
@ -2640,7 +2640,8 @@ static int usage_advanced(void)
DISPLAY( " -S : Single run \n"); DISPLAY( " -S : Single run \n");
DISPLAY( " --zstd : Single run, parameter selection same as zstdcli \n"); DISPLAY( " --zstd : Single run, parameter selection same as zstdcli \n");
DISPLAY( " -P# : generated sample compressibility (default : %.1f%%) \n", COMPRESSIBILITY_DEFAULT * 100); DISPLAY( " -P# : generated sample compressibility (default : %.1f%%) \n", COMPRESSIBILITY_DEFAULT * 100);
DISPLAY( " -t# : Caps runtime of operation in seconds (default : %u seconds (%.1f hours)) \n", g_timeLimit_s, (double)g_timeLimit_s / 3600); DISPLAY( " -t# : Caps runtime of operation in seconds (default : %u seconds (%.1f hours)) \n",
(unsigned)g_timeLimit_s, (double)g_timeLimit_s / 3600);
DISPLAY( " -v : Prints Benchmarking output\n"); DISPLAY( " -v : Prints Benchmarking output\n");
DISPLAY( " -D : Next argument dictionary file\n"); DISPLAY( " -D : Next argument dictionary file\n");
DISPLAY( " -s : Seperate Files\n"); DISPLAY( " -s : Seperate Files\n");
@ -2875,7 +2876,7 @@ int main(int argc, const char** argv)
case 'B': case 'B':
argument++; argument++;
g_blockSize = readU32FromChar(&argument); g_blockSize = readU32FromChar(&argument);
DISPLAY("using %u KB block size \n", g_blockSize>>10); DISPLAY("using %u KB block size \n", (unsigned)(g_blockSize>>10));
break; break;
/* caps runtime (in seconds) */ /* caps runtime (in seconds) */

View File

@ -17,7 +17,7 @@
static const size_t kMatchBytes = 128; static const size_t kMatchBytes = 128;
#define SEQ_rotl32(x,r) ((x << r) | (x >> (32 - r))) #define SEQ_rotl32(x,r) ((x << r) | (x >> (32 - r)))
static BYTE SEQ_randByte(U32* src) static BYTE SEQ_randByte(unsigned* src)
{ {
static const U32 prime1 = 2654435761U; static const U32 prime1 = 2654435761U;
static const U32 prime2 = 2246822519U; static const U32 prime2 = 2246822519U;

View File

@ -46,7 +46,7 @@
#define MB *(1U<<20) #define MB *(1U<<20)
#define GB *(1U<<30) #define GB *(1U<<30)
static const U32 nbTestsDefault = 10000; static const int nbTestsDefault = 10000;
static const U32 g_cLevelMax_smallTests = 10; static const U32 g_cLevelMax_smallTests = 10;
#define COMPRESSIBLE_NOISE_LENGTH (10 MB) #define COMPRESSIBLE_NOISE_LENGTH (10 MB)
#define FUZ_COMPRESSIBILITY_DEFAULT 50 #define FUZ_COMPRESSIBILITY_DEFAULT 50
@ -84,7 +84,7 @@ static U64 g_clockTime = 0;
@return : a 27 bits random value, from a 32-bits `seed`. @return : a 27 bits random value, from a 32-bits `seed`.
`seed` is also modified */ `seed` is also modified */
#define FUZ_rotl32(x,r) ((x << r) | (x >> (32 - r))) #define FUZ_rotl32(x,r) ((x << r) | (x >> (32 - r)))
static unsigned int FUZ_rand(unsigned int* seedPtr) static U32 FUZ_rand(U32* seedPtr)
{ {
static const U32 prime2 = 2246822519U; static const U32 prime2 = 2246822519U;
U32 rand32 = *seedPtr; U32 rand32 = *seedPtr;
@ -100,7 +100,7 @@ static unsigned int FUZ_rand(unsigned int* seedPtr)
DISPLAY("Error => "); \ DISPLAY("Error => "); \
DISPLAY(__VA_ARGS__); \ DISPLAY(__VA_ARGS__); \
DISPLAY(" (seed %u, test nb %u, line %u) \n", \ DISPLAY(" (seed %u, test nb %u, line %u) \n", \
seed, testNb, __LINE__); \ (unsigned)seed, testNb, __LINE__); \
goto _output_error; \ goto _output_error; \
} } } }
@ -268,7 +268,7 @@ static int basicUnitTests(U32 seed, double compressibility)
void* decodedBuffer = malloc(decodedBufferSize); void* decodedBuffer = malloc(decodedBufferSize);
size_t cSize; size_t cSize;
int testResult = 0; int testResult = 0;
U32 testNb = 1; int testNb = 1;
U32 coreSeed = 0; /* this name to conform with CHECK_Z macro display */ U32 coreSeed = 0; /* this name to conform with CHECK_Z macro display */
ZSTD_CStream* zc = ZSTD_createCStream(); ZSTD_CStream* zc = ZSTD_createCStream();
ZSTD_DStream* zd = ZSTD_createDStream(); ZSTD_DStream* zd = ZSTD_createDStream();
@ -309,7 +309,7 @@ static int basicUnitTests(U32 seed, double compressibility)
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */ if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
{ size_t const r = ZSTD_endStream(zc, &outBuff); { size_t const r = ZSTD_endStream(zc, &outBuff);
if (r != 0) goto _output_error; } /* error, or some data not flushed */ if (r != 0) goto _output_error; } /* error, or some data not flushed */
DISPLAYLEVEL(3, "OK (%u bytes)\n", (U32)outBuff.pos); DISPLAYLEVEL(3, "OK (%u bytes)\n", (unsigned)outBuff.pos);
/* generate skippable frame */ /* generate skippable frame */
MEM_writeLE32(compressedBuffer, ZSTD_MAGIC_SKIPPABLE_START); MEM_writeLE32(compressedBuffer, ZSTD_MAGIC_SKIPPABLE_START);
@ -331,7 +331,8 @@ static int basicUnitTests(U32 seed, double compressibility)
{ size_t const r = ZSTD_endStream(zc, &outBuff); { size_t const r = ZSTD_endStream(zc, &outBuff);
if (r != 0) goto _output_error; } /* error, or some data not flushed */ if (r != 0) goto _output_error; } /* error, or some data not flushed */
cSize += outBuff.pos; cSize += outBuff.pos;
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/COMPRESSIBLE_NOISE_LENGTH*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n",
(unsigned)cSize, (double)cSize/COMPRESSIBLE_NOISE_LENGTH*100);
/* context size functions */ /* context size functions */
DISPLAYLEVEL(3, "test%3i : estimate CStream size : ", testNb++); DISPLAYLEVEL(3, "test%3i : estimate CStream size : ", testNb++);
@ -340,13 +341,13 @@ static int basicUnitTests(U32 seed, double compressibility)
size_t const cdictSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); /* uses ZSTD_initCStream_usingDict() */ size_t const cdictSize = ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); /* uses ZSTD_initCStream_usingDict() */
if (ZSTD_isError(cstreamSize)) goto _output_error; if (ZSTD_isError(cstreamSize)) goto _output_error;
if (ZSTD_isError(cdictSize)) goto _output_error; if (ZSTD_isError(cdictSize)) goto _output_error;
DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)(cstreamSize + cdictSize)); DISPLAYLEVEL(3, "OK (%u bytes) \n", (unsigned)(cstreamSize + cdictSize));
} }
DISPLAYLEVEL(3, "test%3i : check actual CStream size : ", testNb++); DISPLAYLEVEL(3, "test%3i : check actual CStream size : ", testNb++);
{ size_t const s = ZSTD_sizeof_CStream(zc); { size_t const s = ZSTD_sizeof_CStream(zc);
if (ZSTD_isError(s)) goto _output_error; if (ZSTD_isError(s)) goto _output_error;
DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s); DISPLAYLEVEL(3, "OK (%u bytes) \n", (unsigned)s);
} }
/* Attempt bad compression parameters */ /* Attempt bad compression parameters */
@ -369,7 +370,7 @@ static int basicUnitTests(U32 seed, double compressibility)
outBuff.size = CNBufferSize; outBuff.size = CNBufferSize;
outBuff.pos = 0; outBuff.pos = 0;
{ size_t const r = ZSTD_decompressStream(zd, &outBuff, &inBuff); { size_t const r = ZSTD_decompressStream(zd, &outBuff, &inBuff);
DISPLAYLEVEL(5, " ( ZSTD_decompressStream => %u ) ", (U32)r); DISPLAYLEVEL(5, " ( ZSTD_decompressStream => %u ) ", (unsigned)r);
if (r != 0) goto _output_error; if (r != 0) goto _output_error;
} }
if (outBuff.pos != 0) goto _output_error; /* skippable frame output len is 0 */ if (outBuff.pos != 0) goto _output_error; /* skippable frame output len is 0 */
@ -409,18 +410,18 @@ static int basicUnitTests(U32 seed, double compressibility)
const void* cStart = (char*)compressedBuffer + (skippableFrameSize + 8); const void* cStart = (char*)compressedBuffer + (skippableFrameSize + 8);
size_t const gfhError = ZSTD_getFrameHeader(&fhi, cStart, cSize); size_t const gfhError = ZSTD_getFrameHeader(&fhi, cStart, cSize);
if (gfhError!=0) goto _output_error; if (gfhError!=0) goto _output_error;
DISPLAYLEVEL(5, " (windowSize : %u) ", (U32)fhi.windowSize); DISPLAYLEVEL(5, " (windowSize : %u) ", (unsigned)fhi.windowSize);
{ size_t const s = ZSTD_estimateDStreamSize(fhi.windowSize) { size_t const s = ZSTD_estimateDStreamSize(fhi.windowSize)
/* uses ZSTD_initDStream_usingDict() */ /* uses ZSTD_initDStream_usingDict() */
+ ZSTD_estimateDDictSize(dictSize, ZSTD_dlm_byCopy); + ZSTD_estimateDDictSize(dictSize, ZSTD_dlm_byCopy);
if (ZSTD_isError(s)) goto _output_error; if (ZSTD_isError(s)) goto _output_error;
DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s); DISPLAYLEVEL(3, "OK (%u bytes) \n", (unsigned)s);
} } } }
DISPLAYLEVEL(3, "test%3i : check actual DStream size : ", testNb++); DISPLAYLEVEL(3, "test%3i : check actual DStream size : ", testNb++);
{ size_t const s = ZSTD_sizeof_DStream(zd); { size_t const s = ZSTD_sizeof_DStream(zd);
if (ZSTD_isError(s)) goto _output_error; if (ZSTD_isError(s)) goto _output_error;
DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s); DISPLAYLEVEL(3, "OK (%u bytes) \n", (unsigned)s);
} }
/* Decompression by small increment */ /* Decompression by small increment */
@ -507,7 +508,7 @@ static int basicUnitTests(U32 seed, double compressibility)
if (r != 0) goto _output_error; } /* error, or some data not flushed */ if (r != 0) goto _output_error; } /* error, or some data not flushed */
{ unsigned long long origSize = ZSTD_findDecompressedSize(outBuff.dst, outBuff.pos); { unsigned long long origSize = ZSTD_findDecompressedSize(outBuff.dst, outBuff.pos);
if ((size_t)origSize != CNBufferSize) goto _output_error; } /* exact original size must be present */ if ((size_t)origSize != CNBufferSize) goto _output_error; } /* exact original size must be present */
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/COMPRESSIBLE_NOISE_LENGTH*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/COMPRESSIBLE_NOISE_LENGTH*100);
/* wrong _srcSize compression test */ /* wrong _srcSize compression test */
DISPLAYLEVEL(3, "test%3i : too large srcSize : %u bytes : ", testNb++, COMPRESSIBLE_NOISE_LENGTH-1); DISPLAYLEVEL(3, "test%3i : too large srcSize : %u bytes : ", testNb++, COMPRESSIBLE_NOISE_LENGTH-1);
@ -598,7 +599,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(3, "test%3i : digested dictionary : ", testNb++); DISPLAYLEVEL(3, "test%3i : digested dictionary : ", testNb++);
{ ZSTD_CDict* const cdict = ZSTD_createCDict(dictionary.start, dictionary.filled, 1 /*byRef*/ ); { ZSTD_CDict* const cdict = ZSTD_createCDict(dictionary.start, dictionary.filled, 1 /*byRef*/ );
size_t const initError = ZSTD_initCStream_usingCDict(zc, cdict); size_t const initError = ZSTD_initCStream_usingCDict(zc, cdict);
DISPLAYLEVEL(5, "ZSTD_initCStream_usingCDict result : %u ", (U32)initError); DISPLAYLEVEL(5, "ZSTD_initCStream_usingCDict result : %u ", (unsigned)initError);
if (ZSTD_isError(initError)) goto _output_error; if (ZSTD_isError(initError)) goto _output_error;
outBuff.dst = compressedBuffer; outBuff.dst = compressedBuffer;
outBuff.size = compressedBufferSize; outBuff.size = compressedBufferSize;
@ -610,18 +611,18 @@ static int basicUnitTests(U32 seed, double compressibility)
CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) ); CHECK_Z( ZSTD_compressStream(zc, &outBuff, &inBuff) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */ if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
{ size_t const r = ZSTD_endStream(zc, &outBuff); { size_t const r = ZSTD_endStream(zc, &outBuff);
DISPLAYLEVEL(5, "- ZSTD_endStream result : %u ", (U32)r); DISPLAYLEVEL(5, "- ZSTD_endStream result : %u ", (unsigned)r);
if (r != 0) goto _output_error; /* error, or some data not flushed */ if (r != 0) goto _output_error; /* error, or some data not flushed */
} }
cSize = outBuff.pos; cSize = outBuff.pos;
ZSTD_freeCDict(cdict); ZSTD_freeCDict(cdict);
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBufferSize*100);
} }
DISPLAYLEVEL(3, "test%3i : check CStream size : ", testNb++); DISPLAYLEVEL(3, "test%3i : check CStream size : ", testNb++);
{ size_t const s = ZSTD_sizeof_CStream(zc); { size_t const s = ZSTD_sizeof_CStream(zc);
if (ZSTD_isError(s)) goto _output_error; if (ZSTD_isError(s)) goto _output_error;
DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s); DISPLAYLEVEL(3, "OK (%u bytes) \n", (unsigned)s);
} }
DISPLAYLEVEL(4, "test%3i : check Dictionary ID : ", testNb++); DISPLAYLEVEL(4, "test%3i : check Dictionary ID : ", testNb++);
@ -631,7 +632,7 @@ static int basicUnitTests(U32 seed, double compressibility)
} }
/* DDict scenario */ /* DDict scenario */
DISPLAYLEVEL(3, "test%3i : decompress %u bytes with digested dictionary : ", testNb++, (U32)CNBufferSize); DISPLAYLEVEL(3, "test%3i : decompress %u bytes with digested dictionary : ", testNb++, (unsigned)CNBufferSize);
{ ZSTD_DDict* const ddict = ZSTD_createDDict(dictionary.start, dictionary.filled); { ZSTD_DDict* const ddict = ZSTD_createDDict(dictionary.start, dictionary.filled);
size_t const initError = ZSTD_initDStream_usingDDict(zd, ddict); size_t const initError = ZSTD_initDStream_usingDDict(zd, ddict);
if (ZSTD_isError(initError)) goto _output_error; if (ZSTD_isError(initError)) goto _output_error;
@ -723,7 +724,7 @@ static int basicUnitTests(U32 seed, double compressibility)
if (r != 0) goto _output_error; } /* error, or some data not flushed */ if (r != 0) goto _output_error; } /* error, or some data not flushed */
cSize = outBuff.pos; cSize = outBuff.pos;
ZSTD_freeCDict(cdict); ZSTD_freeCDict(cdict);
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBufferSize*100);
} }
DISPLAYLEVEL(3, "test%3i : try retrieving dictID from frame : ", testNb++); DISPLAYLEVEL(3, "test%3i : try retrieving dictID from frame : ", testNb++);
@ -749,7 +750,7 @@ static int basicUnitTests(U32 seed, double compressibility)
CHECK_Z( ZSTD_compressStream2(zc, &outBuff, &inBuff, ZSTD_e_end) ); CHECK_Z( ZSTD_compressStream2(zc, &outBuff, &inBuff, ZSTD_e_end) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */ if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
cSize = outBuff.pos; cSize = outBuff.pos;
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBufferSize*100);
DISPLAYLEVEL(3, "test%3i : decompress with ZSTD_DCtx_refPrefix : ", testNb++); DISPLAYLEVEL(3, "test%3i : decompress with ZSTD_DCtx_refPrefix : ", testNb++);
CHECK_Z( ZSTD_DCtx_refPrefix(zd, dictionary.start, dictionary.filled) ); CHECK_Z( ZSTD_DCtx_refPrefix(zd, dictionary.start, dictionary.filled) );
@ -780,7 +781,7 @@ static int basicUnitTests(U32 seed, double compressibility)
CHECK_Z( ZSTD_compressStream2(zc, &outBuff, &inBuff, ZSTD_e_end) ); CHECK_Z( ZSTD_compressStream2(zc, &outBuff, &inBuff, ZSTD_e_end) );
if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */ if (inBuff.pos != inBuff.size) goto _output_error; /* entire input should be consumed */
cSize = outBuff.pos; cSize = outBuff.pos;
DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100); DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (unsigned)cSize, (double)cSize/CNBufferSize*100);
DISPLAYLEVEL(3, "test%3i : decompress without dictionary (should work): ", testNb++); DISPLAYLEVEL(3, "test%3i : decompress without dictionary (should work): ", testNb++);
CHECK_Z( ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, cSize) ); CHECK_Z( ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, cSize) );
@ -867,7 +868,7 @@ static int basicUnitTests(U32 seed, double compressibility)
size_t const start = jobSize + (offset-1); size_t const start = jobSize + (offset-1);
const BYTE* const srcToCopy = (const BYTE*)CNBuffer + start; const BYTE* const srcToCopy = (const BYTE*)CNBuffer + start;
BYTE* const dst = (BYTE*)CNBuffer + start - offset; BYTE* const dst = (BYTE*)CNBuffer + start - offset;
DISPLAYLEVEL(3, "test%3i : compress %u bytes with multiple threads + dictionary : ", testNb++, (U32)srcSize); DISPLAYLEVEL(3, "test%3i : compress %u bytes with multiple threads + dictionary : ", testNb++, (unsigned)srcSize);
CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_compressionLevel, 3) ); CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_compressionLevel, 3) );
CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_nbWorkers, nbWorkers) ); CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_nbWorkers, nbWorkers) );
CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_jobSize, jobSize) ); CHECK_Z( ZSTD_CCtx_setParameter(zc, ZSTD_c_jobSize, jobSize) );
@ -897,7 +898,7 @@ static int basicUnitTests(U32 seed, double compressibility)
{ ZSTD_DStream* const dstream = ZSTD_createDCtx(); { ZSTD_DStream* const dstream = ZSTD_createDCtx();
ZSTD_frameHeader zfh; ZSTD_frameHeader zfh;
ZSTD_getFrameHeader(&zfh, compressedBuffer, cSize); ZSTD_getFrameHeader(&zfh, compressedBuffer, cSize);
DISPLAYLEVEL(5, "frame windowsize = %u : ", (U32)zfh.windowSize); DISPLAYLEVEL(5, "frame windowsize = %u : ", (unsigned)zfh.windowSize);
outBuff.dst = decodedBuffer; outBuff.dst = decodedBuffer;
outBuff.size = CNBufferSize; outBuff.size = CNBufferSize;
outBuff.pos = 0; outBuff.pos = 0;
@ -1132,10 +1133,10 @@ static size_t findDiff(const void* buf1, const void* buf2, size_t max)
if (b1[u] != b2[u]) break; if (b1[u] != b2[u]) break;
} }
if (u==max) { if (u==max) {
DISPLAY("=> No difference detected within %u bytes \n", (U32)max); DISPLAY("=> No difference detected within %u bytes \n", (unsigned)max);
return u; return u;
} }
DISPLAY("Error at position %u / %u \n", (U32)u, (U32)max); DISPLAY("Error at position %u / %u \n", (unsigned)u, (unsigned)max);
if (u>=3) if (u>=3)
DISPLAY(" %02X %02X %02X ", DISPLAY(" %02X %02X %02X ",
b1[u-3], b1[u-2], b1[u-1]); b1[u-3], b1[u-2], b1[u-1]);
@ -1168,7 +1169,7 @@ static U32 FUZ_randomClampedLength(U32* seed, U32 minVal, U32 maxVal)
return (U32)((FUZ_rand(seed) % mod) + minVal); return (U32)((FUZ_rand(seed) % mod) + minVal);
} }
static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compressibility, int bigTests) static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, double compressibility, int bigTests)
{ {
U32 const maxSrcLog = bigTests ? 24 : 22; U32 const maxSrcLog = bigTests ? 24 : 22;
static const U32 maxSampleLog = 19; static const U32 maxSampleLog = 19;
@ -1181,7 +1182,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compres
size_t const dstBufferSize = srcBufferSize; size_t const dstBufferSize = srcBufferSize;
BYTE* const dstBuffer = (BYTE*)malloc (dstBufferSize); BYTE* const dstBuffer = (BYTE*)malloc (dstBufferSize);
U32 result = 0; U32 result = 0;
U32 testNb = 0; unsigned testNb = 0;
U32 coreSeed = seed; U32 coreSeed = seed;
ZSTD_CStream* zc = ZSTD_createCStream(); /* will be re-created sometimes */ ZSTD_CStream* zc = ZSTD_createCStream(); /* will be re-created sometimes */
ZSTD_DStream* zd = ZSTD_createDStream(); /* will be re-created sometimes */ ZSTD_DStream* zd = ZSTD_createDStream(); /* will be re-created sometimes */
@ -1362,7 +1363,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compres
} }
CHECK (decompressionResult != 0, "frame not fully decoded"); CHECK (decompressionResult != 0, "frame not fully decoded");
CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)", CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)",
(U32)outBuff.pos, (U32)totalTestSize); (unsigned)outBuff.pos, (unsigned)totalTestSize);
CHECK (inBuff.pos != cSize, "compressed data should be fully read") CHECK (inBuff.pos != cSize, "compressed data should be fully read")
{ U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0); { U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0);
if (crcDest!=crcOrig) findDiff(copyBuffer, dstBuffer, totalTestSize); if (crcDest!=crcOrig) findDiff(copyBuffer, dstBuffer, totalTestSize);
@ -1420,7 +1421,7 @@ _output_error:
/* fuzzing ZSTDMT_* interface */ /* fuzzing ZSTDMT_* interface */
static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, static int fuzzerTests_MT(U32 seed, int nbTests, int startTest,
double compressibility, int bigTests) double compressibility, int bigTests)
{ {
const U32 maxSrcLog = bigTests ? 24 : 22; const U32 maxSrcLog = bigTests ? 24 : 22;
@ -1434,9 +1435,9 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest,
size_t const dstBufferSize = srcBufferSize; size_t const dstBufferSize = srcBufferSize;
BYTE* const dstBuffer = (BYTE*)malloc (dstBufferSize); BYTE* const dstBuffer = (BYTE*)malloc (dstBufferSize);
U32 result = 0; U32 result = 0;
U32 testNb = 0; int testNb = 0;
U32 coreSeed = seed; U32 coreSeed = seed;
U32 nbThreads = 2; int nbThreads = 2;
ZSTDMT_CCtx* zc = ZSTDMT_createCCtx(nbThreads); /* will be reset sometimes */ ZSTDMT_CCtx* zc = ZSTDMT_createCCtx(nbThreads); /* will be reset sometimes */
ZSTD_DStream* zd = ZSTD_createDStream(); /* will be reset sometimes */ ZSTD_DStream* zd = ZSTD_createDStream(); /* will be reset sometimes */
ZSTD_DStream* const zd_noise = ZSTD_createDStream(); ZSTD_DStream* const zd_noise = ZSTD_createDStream();
@ -1464,7 +1465,7 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest,
RDG_genBuffer(cNoiseBuffer[4], srcBufferSize, 1.00, 0., coreSeed); /* sparse content */ RDG_genBuffer(cNoiseBuffer[4], srcBufferSize, 1.00, 0., coreSeed); /* sparse content */
memset(copyBuffer, 0x65, copyBufferSize); /* make copyBuffer considered initialized */ memset(copyBuffer, 0x65, copyBufferSize); /* make copyBuffer considered initialized */
ZSTD_initDStream_usingDict(zd, NULL, 0); /* ensure at least one init */ ZSTD_initDStream_usingDict(zd, NULL, 0); /* ensure at least one init */
DISPLAYLEVEL(6, "Creating initial context with %u threads \n", nbThreads); DISPLAYLEVEL(6, "Creating initial context with %i threads \n", nbThreads);
/* catch up testNb */ /* catch up testNb */
for (testNb=1; testNb < startTest; testNb++) for (testNb=1; testNb < startTest; testNb++)
@ -1542,7 +1543,7 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest,
{ U64 const pledgedSrcSize = (FUZ_rand(&lseed) & 3) ? ZSTD_CONTENTSIZE_UNKNOWN : maxTestSize; { U64 const pledgedSrcSize = (FUZ_rand(&lseed) & 3) ? ZSTD_CONTENTSIZE_UNKNOWN : maxTestSize;
ZSTD_parameters params = ZSTD_getParams(cLevel, pledgedSrcSize, dictSize); ZSTD_parameters params = ZSTD_getParams(cLevel, pledgedSrcSize, dictSize);
DISPLAYLEVEL(5, "Init with windowLog = %u, pledgedSrcSize = %u, dictSize = %u \n", DISPLAYLEVEL(5, "Init with windowLog = %u, pledgedSrcSize = %u, dictSize = %u \n",
params.cParams.windowLog, (U32)pledgedSrcSize, (U32)dictSize); params.cParams.windowLog, (unsigned)pledgedSrcSize, (unsigned)dictSize);
params.fParams.checksumFlag = FUZ_rand(&lseed) & 1; params.fParams.checksumFlag = FUZ_rand(&lseed) & 1;
params.fParams.noDictIDFlag = FUZ_rand(&lseed) & 1; params.fParams.noDictIDFlag = FUZ_rand(&lseed) & 1;
params.fParams.contentSizeFlag = FUZ_rand(&lseed) & 1; params.fParams.contentSizeFlag = FUZ_rand(&lseed) & 1;
@ -1566,9 +1567,9 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest,
ZSTD_inBuffer inBuff = { srcBuffer+srcStart, srcSize, 0 }; ZSTD_inBuffer inBuff = { srcBuffer+srcStart, srcSize, 0 };
outBuff.size = outBuff.pos + dstBuffSize; outBuff.size = outBuff.pos + dstBuffSize;
DISPLAYLEVEL(6, "Sending %u bytes to compress \n", (U32)srcSize); DISPLAYLEVEL(6, "Sending %u bytes to compress \n", (unsigned)srcSize);
CHECK_Z( ZSTDMT_compressStream(zc, &outBuff, &inBuff) ); CHECK_Z( ZSTDMT_compressStream(zc, &outBuff, &inBuff) );
DISPLAYLEVEL(6, "%u bytes read by ZSTDMT_compressStream \n", (U32)inBuff.pos); DISPLAYLEVEL(6, "%u bytes read by ZSTDMT_compressStream \n", (unsigned)inBuff.pos);
XXH64_update(&xxhState, srcBuffer+srcStart, inBuff.pos); XXH64_update(&xxhState, srcBuffer+srcStart, inBuff.pos);
memcpy(copyBuffer+totalTestSize, srcBuffer+srcStart, inBuff.pos); memcpy(copyBuffer+totalTestSize, srcBuffer+srcStart, inBuff.pos);
@ -1581,10 +1582,10 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest,
size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize); size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize);
size_t const previousPos = outBuff.pos; size_t const previousPos = outBuff.pos;
outBuff.size = outBuff.pos + adjustedDstSize; outBuff.size = outBuff.pos + adjustedDstSize;
DISPLAYLEVEL(5, "Flushing into dst buffer of size %u \n", (U32)adjustedDstSize); DISPLAYLEVEL(5, "Flushing into dst buffer of size %u \n", (unsigned)adjustedDstSize);
CHECK_Z( ZSTDMT_flushStream(zc, &outBuff) ); CHECK_Z( ZSTDMT_flushStream(zc, &outBuff) );
assert(outBuff.pos >= previousPos); assert(outBuff.pos >= previousPos);
DISPLAYLEVEL(6, "%u bytes flushed by ZSTDMT_flushStream \n", (U32)(outBuff.pos-previousPos)); DISPLAYLEVEL(6, "%u bytes flushed by ZSTDMT_flushStream \n", (unsigned)(outBuff.pos-previousPos));
} } } }
/* final frame epilogue */ /* final frame epilogue */
@ -1594,17 +1595,17 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest,
size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize); size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize);
size_t const previousPos = outBuff.pos; size_t const previousPos = outBuff.pos;
outBuff.size = outBuff.pos + adjustedDstSize; outBuff.size = outBuff.pos + adjustedDstSize;
DISPLAYLEVEL(5, "Ending into dst buffer of size %u \n", (U32)adjustedDstSize); DISPLAYLEVEL(5, "Ending into dst buffer of size %u \n", (unsigned)adjustedDstSize);
remainingToFlush = ZSTDMT_endStream(zc, &outBuff); remainingToFlush = ZSTDMT_endStream(zc, &outBuff);
CHECK (ZSTD_isError(remainingToFlush), "ZSTDMT_endStream error : %s", ZSTD_getErrorName(remainingToFlush)); CHECK (ZSTD_isError(remainingToFlush), "ZSTDMT_endStream error : %s", ZSTD_getErrorName(remainingToFlush));
assert(outBuff.pos >= previousPos); assert(outBuff.pos >= previousPos);
DISPLAYLEVEL(6, "%u bytes flushed by ZSTDMT_endStream \n", (U32)(outBuff.pos-previousPos)); DISPLAYLEVEL(6, "%u bytes flushed by ZSTDMT_endStream \n", (unsigned)(outBuff.pos-previousPos));
DISPLAYLEVEL(5, "endStream : remainingToFlush : %u \n", (U32)remainingToFlush); DISPLAYLEVEL(5, "endStream : remainingToFlush : %u \n", (unsigned)remainingToFlush);
} } } }
crcOrig = XXH64_digest(&xxhState); crcOrig = XXH64_digest(&xxhState);
cSize = outBuff.pos; cSize = outBuff.pos;
DISPLAYLEVEL(5, "Frame completed : %u bytes compressed into %u bytes \n", DISPLAYLEVEL(5, "Frame completed : %u bytes compressed into %u bytes \n",
(U32)totalTestSize, (U32)cSize); (unsigned)totalTestSize, (unsigned)cSize);
} }
/* multi - fragments decompression test */ /* multi - fragments decompression test */
@ -1625,7 +1626,7 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest,
inBuff.size = inBuff.pos + readCSrcSize; inBuff.size = inBuff.pos + readCSrcSize;
outBuff.size = outBuff.pos + dstBuffSize; outBuff.size = outBuff.pos + dstBuffSize;
DISPLAYLEVEL(6, "ZSTD_decompressStream input %u bytes into outBuff %u bytes \n", DISPLAYLEVEL(6, "ZSTD_decompressStream input %u bytes into outBuff %u bytes \n",
(U32)readCSrcSize, (U32)dstBuffSize); (unsigned)readCSrcSize, (unsigned)dstBuffSize);
decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff); decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff);
if (ZSTD_isError(decompressionResult)) { if (ZSTD_isError(decompressionResult)) {
DISPLAY("ZSTD_decompressStream error : %s \n", ZSTD_getErrorName(decompressionResult)); DISPLAY("ZSTD_decompressStream error : %s \n", ZSTD_getErrorName(decompressionResult));
@ -1633,10 +1634,14 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest,
} }
CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult)); CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult));
DISPLAYLEVEL(6, "total ingested (inBuff.pos) = %u and produced (outBuff.pos) = %u \n", DISPLAYLEVEL(6, "total ingested (inBuff.pos) = %u and produced (outBuff.pos) = %u \n",
(U32)inBuff.pos, (U32)outBuff.pos); (unsigned)inBuff.pos, (unsigned)outBuff.pos);
} }
CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)", (U32)outBuff.pos, (U32)totalTestSize); CHECK (outBuff.pos != totalTestSize,
CHECK (inBuff.pos != cSize, "compressed data should be fully read (%u != %u)", (U32)inBuff.pos, (U32)cSize); "decompressed data : wrong size (%u != %u)",
(unsigned)outBuff.pos, (unsigned)totalTestSize );
CHECK (inBuff.pos != cSize,
"compressed data should be fully read (%u != %u)",
(unsigned)inBuff.pos, (unsigned)cSize );
{ U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0); { U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0);
if (crcDest!=crcOrig) findDiff(copyBuffer, dstBuffer, totalTestSize); if (crcDest!=crcOrig) findDiff(copyBuffer, dstBuffer, totalTestSize);
CHECK (crcDest!=crcOrig, "decompressed data corrupted"); CHECK (crcDest!=crcOrig, "decompressed data corrupted");
@ -1705,7 +1710,7 @@ static size_t setCCtxParameter(ZSTD_CCtx* zc, ZSTD_CCtx_params* cctxParams,
} }
/* Tests for ZSTD_compress_generic() API */ /* Tests for ZSTD_compress_generic() API */
static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, static int fuzzerTests_newAPI(U32 seed, int nbTests, int startTest,
double compressibility, int bigTests) double compressibility, int bigTests)
{ {
U32 const maxSrcLog = bigTests ? 24 : 22; U32 const maxSrcLog = bigTests ? 24 : 22;
@ -1719,7 +1724,7 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest,
size_t const dstBufferSize = srcBufferSize; size_t const dstBufferSize = srcBufferSize;
BYTE* const dstBuffer = (BYTE*)malloc (dstBufferSize); BYTE* const dstBuffer = (BYTE*)malloc (dstBufferSize);
U32 result = 0; U32 result = 0;
U32 testNb = 0; int testNb = 0;
U32 coreSeed = seed; U32 coreSeed = seed;
ZSTD_CCtx* zc = ZSTD_createCCtx(); /* will be reset sometimes */ ZSTD_CCtx* zc = ZSTD_createCCtx(); /* will be reset sometimes */
ZSTD_DStream* zd = ZSTD_createDStream(); /* will be reset sometimes */ ZSTD_DStream* zd = ZSTD_createDStream(); /* will be reset sometimes */
@ -1826,10 +1831,10 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest,
(ZSTD_maxCLevel() - (ZSTD_maxCLevel() -
(MAX(testLog, dictLog) / 2))) + (MAX(testLog, dictLog) / 2))) +
1; 1;
U32 const cLevel = MIN(cLevelCandidate, cLevelMax); int const cLevel = MIN(cLevelCandidate, cLevelMax);
DISPLAYLEVEL(5, "t%u: base cLevel : %u \n", testNb, cLevel); DISPLAYLEVEL(5, "t%i: base cLevel : %u \n", testNb, cLevel);
maxTestSize = FUZ_rLogLength(&lseed, testLog); maxTestSize = FUZ_rLogLength(&lseed, testLog);
DISPLAYLEVEL(5, "t%u: maxTestSize : %u \n", testNb, (U32)maxTestSize); DISPLAYLEVEL(5, "t%i: maxTestSize : %u \n", testNb, (unsigned)maxTestSize);
oldTestLog = testLog; oldTestLog = testLog;
/* random dictionary selection */ /* random dictionary selection */
dictSize = ((FUZ_rand(&lseed)&63)==1) ? FUZ_rLogLength(&lseed, dictLog) : 0; dictSize = ((FUZ_rand(&lseed)&63)==1) ? FUZ_rLogLength(&lseed, dictLog) : 0;
@ -1884,14 +1889,14 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest,
/* mess with frame parameters */ /* mess with frame parameters */
if (FUZ_rand(&lseed) & 1) { if (FUZ_rand(&lseed) & 1) {
U32 const checksumFlag = FUZ_rand(&lseed) & 1; int const checksumFlag = FUZ_rand(&lseed) & 1;
DISPLAYLEVEL(5, "t%u: frame checksum : %u \n", testNb, checksumFlag); DISPLAYLEVEL(5, "t%u: frame checksum : %u \n", testNb, checksumFlag);
CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_checksumFlag, checksumFlag, opaqueAPI) ); CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_checksumFlag, checksumFlag, opaqueAPI) );
} }
if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_dictIDFlag, FUZ_rand(&lseed) & 1, opaqueAPI) ); if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_dictIDFlag, FUZ_rand(&lseed) & 1, opaqueAPI) );
if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_contentSizeFlag, FUZ_rand(&lseed) & 1, opaqueAPI) ); if (FUZ_rand(&lseed) & 1) CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_contentSizeFlag, FUZ_rand(&lseed) & 1, opaqueAPI) );
if (FUZ_rand(&lseed) & 1) { if (FUZ_rand(&lseed) & 1) {
DISPLAYLEVEL(5, "t%u: pledgedSrcSize : %u \n", testNb, (U32)pledgedSrcSize); DISPLAYLEVEL(5, "t%u: pledgedSrcSize : %u \n", testNb, (unsigned)pledgedSrcSize);
CHECK_Z( ZSTD_CCtx_setPledgedSrcSize(zc, pledgedSrcSize) ); CHECK_Z( ZSTD_CCtx_setPledgedSrcSize(zc, pledgedSrcSize) );
} }
@ -1899,8 +1904,8 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest,
if (bigTests || (FUZ_rand(&lseed) & 0xF) == 0xF) { if (bigTests || (FUZ_rand(&lseed) & 0xF) == 0xF) {
U32 const nbThreadsCandidate = (FUZ_rand(&lseed) & 4) + 1; U32 const nbThreadsCandidate = (FUZ_rand(&lseed) & 4) + 1;
U32 const nbThreadsAdjusted = (windowLogMalus < nbThreadsCandidate) ? nbThreadsCandidate - windowLogMalus : 1; U32 const nbThreadsAdjusted = (windowLogMalus < nbThreadsCandidate) ? nbThreadsCandidate - windowLogMalus : 1;
U32 const nbThreads = MIN(nbThreadsAdjusted, nbThreadsMax); int const nbThreads = MIN(nbThreadsAdjusted, nbThreadsMax);
DISPLAYLEVEL(5, "t%u: nbThreads : %u \n", testNb, nbThreads); DISPLAYLEVEL(5, "t%i: nbThreads : %u \n", testNb, nbThreads);
CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_nbWorkers, nbThreads, opaqueAPI) ); CHECK_Z( setCCtxParameter(zc, cctxParams, ZSTD_c_nbWorkers, nbThreads, opaqueAPI) );
if (nbThreads > 1) { if (nbThreads > 1) {
U32 const jobLog = FUZ_rand(&lseed) % (testLog+1); U32 const jobLog = FUZ_rand(&lseed) % (testLog+1);
@ -1958,7 +1963,7 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest,
CHECK_Z( ZSTD_compressStream2(zc, &outBuff, &inBuff, flush) ); CHECK_Z( ZSTD_compressStream2(zc, &outBuff, &inBuff, flush) );
DISPLAYLEVEL(6, "t%u: compress consumed %u bytes (total : %u) ; flush: %u (total : %u) \n", DISPLAYLEVEL(6, "t%u: compress consumed %u bytes (total : %u) ; flush: %u (total : %u) \n",
testNb, (U32)inBuff.pos, (U32)(totalTestSize + inBuff.pos), (U32)flush, (U32)outBuff.pos); testNb, (unsigned)inBuff.pos, (unsigned)(totalTestSize + inBuff.pos), (unsigned)flush, (unsigned)outBuff.pos);
XXH64_update(&xxhState, srcBuffer+srcStart, inBuff.pos); XXH64_update(&xxhState, srcBuffer+srcStart, inBuff.pos);
memcpy(copyBuffer+totalTestSize, srcBuffer+srcStart, inBuff.pos); memcpy(copyBuffer+totalTestSize, srcBuffer+srcStart, inBuff.pos);
@ -1972,9 +1977,9 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest,
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog+1); size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog+1);
size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize); size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize);
outBuff.size = outBuff.pos + adjustedDstSize; outBuff.size = outBuff.pos + adjustedDstSize;
DISPLAYLEVEL(6, "t%u: End-flush into dst buffer of size %u \n", testNb, (U32)adjustedDstSize); DISPLAYLEVEL(6, "t%u: End-flush into dst buffer of size %u \n", testNb, (unsigned)adjustedDstSize);
remainingToFlush = ZSTD_compressStream2(zc, &outBuff, &inBuff, ZSTD_e_end); remainingToFlush = ZSTD_compressStream2(zc, &outBuff, &inBuff, ZSTD_e_end);
DISPLAYLEVEL(6, "t%u: Total flushed so far : %u bytes \n", testNb, (U32)outBuff.pos); DISPLAYLEVEL(6, "t%u: Total flushed so far : %u bytes \n", testNb, (unsigned)outBuff.pos);
CHECK( ZSTD_isError(remainingToFlush), CHECK( ZSTD_isError(remainingToFlush),
"ZSTD_compressStream2 w/ ZSTD_e_end error : %s", "ZSTD_compressStream2 w/ ZSTD_e_end error : %s",
ZSTD_getErrorName(remainingToFlush) ); ZSTD_getErrorName(remainingToFlush) );
@ -1988,7 +1993,7 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest,
/* multi - fragments decompression test */ /* multi - fragments decompression test */
if (!dictSize /* don't reset if dictionary : could be different */ && (FUZ_rand(&lseed) & 1)) { if (!dictSize /* don't reset if dictionary : could be different */ && (FUZ_rand(&lseed) & 1)) {
DISPLAYLEVEL(5, "resetting DCtx (dict:%08X) \n", (U32)(size_t)dict); DISPLAYLEVEL(5, "resetting DCtx (dict:%p) \n", dict);
CHECK_Z( ZSTD_resetDStream(zd) ); CHECK_Z( ZSTD_resetDStream(zd) );
} else { } else {
if (dictSize) if (dictSize)
@ -2005,19 +2010,19 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest,
inBuff.size = inBuff.pos + readCSrcSize; inBuff.size = inBuff.pos + readCSrcSize;
outBuff.size = outBuff.pos + dstBuffSize; outBuff.size = outBuff.pos + dstBuffSize;
DISPLAYLEVEL(6, "decompression presented %u new bytes (pos:%u/%u)\n", DISPLAYLEVEL(6, "decompression presented %u new bytes (pos:%u/%u)\n",
(U32)readCSrcSize, (U32)inBuff.pos, (U32)cSize); (unsigned)readCSrcSize, (unsigned)inBuff.pos, (unsigned)cSize);
decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff); decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff);
DISPLAYLEVEL(6, "so far: consumed = %u, produced = %u \n", DISPLAYLEVEL(6, "so far: consumed = %u, produced = %u \n",
(U32)inBuff.pos, (U32)outBuff.pos); (unsigned)inBuff.pos, (unsigned)outBuff.pos);
if (ZSTD_isError(decompressionResult)) { if (ZSTD_isError(decompressionResult)) {
DISPLAY("ZSTD_decompressStream error : %s \n", ZSTD_getErrorName(decompressionResult)); DISPLAY("ZSTD_decompressStream error : %s \n", ZSTD_getErrorName(decompressionResult));
findDiff(copyBuffer, dstBuffer, totalTestSize); findDiff(copyBuffer, dstBuffer, totalTestSize);
} }
CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult)); CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult));
CHECK (inBuff.pos > cSize, "ZSTD_decompressStream consumes too much input : %u > %u ", (U32)inBuff.pos, (U32)cSize); CHECK (inBuff.pos > cSize, "ZSTD_decompressStream consumes too much input : %u > %u ", (unsigned)inBuff.pos, (unsigned)cSize);
} }
CHECK (inBuff.pos != cSize, "compressed data should be fully read (%u != %u)", (U32)inBuff.pos, (U32)cSize); CHECK (inBuff.pos != cSize, "compressed data should be fully read (%u != %u)", (unsigned)inBuff.pos, (unsigned)cSize);
CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)", (U32)outBuff.pos, (U32)totalTestSize); CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)", (unsigned)outBuff.pos, (unsigned)totalTestSize);
{ U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0); { U64 const crcDest = XXH64(dstBuffer, totalTestSize, 0);
if (crcDest!=crcOrig) findDiff(copyBuffer, dstBuffer, totalTestSize); if (crcDest!=crcOrig) findDiff(copyBuffer, dstBuffer, totalTestSize);
CHECK (crcDest!=crcOrig, "decompressed data corrupted"); CHECK (crcDest!=crcOrig, "decompressed data corrupted");
@ -2214,7 +2219,7 @@ int main(int argc, const char** argv)
seed = h % 10000; seed = h % 10000;
} }
DISPLAY("Seed = %u\n", seed); DISPLAY("Seed = %u\n", (unsigned)seed);
if (proba!=FUZ_COMPRESSIBILITY_DEFAULT) DISPLAY("Compressibility : %i%%\n", proba); if (proba!=FUZ_COMPRESSIBILITY_DEFAULT) DISPLAY("Compressibility : %i%%\n", proba);
if (nbTests<=0) nbTests=1; if (nbTests<=0) nbTests=1;

View File

@ -104,7 +104,7 @@ static clock_t g_time = 0;
/* ************************************* /* *************************************
* Benchmark Parameters * Benchmark Parameters
***************************************/ ***************************************/
static U32 g_nbIterations = NBLOOPS; static unsigned g_nbIterations = NBLOOPS;
static size_t g_blockSize = 0; static size_t g_blockSize = 0;
int g_additionalParam = 0; int g_additionalParam = 0;
@ -121,7 +121,7 @@ void BMK_SetNbIterations(unsigned nbLoops)
void BMK_SetBlockSize(size_t blockSize) void BMK_SetBlockSize(size_t blockSize)
{ {
g_blockSize = blockSize; g_blockSize = blockSize;
DISPLAYLEVEL(2, "using blocks of size %u KB \n", (U32)(blockSize>>10)); DISPLAYLEVEL(2, "using blocks of size %u KB \n", (unsigned)(blockSize>>10));
} }
@ -222,7 +222,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
} }
/* Compression */ /* Compression */
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (U32)srcSize); DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (unsigned)srcSize);
if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */ if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */
UTIL_sleepMilli(1); /* give processor time to other processes */ UTIL_sleepMilli(1); /* give processor time to other processes */
@ -371,7 +371,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
ratio = (double)srcSize / (double)cSize; ratio = (double)srcSize / (double)cSize;
markNb = (markNb+1) % NB_MARKS; markNb = (markNb+1) % NB_MARKS;
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s\r", DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s\r",
marks[markNb], displayName, (U32)srcSize, (U32)cSize, ratio, marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio,
(double)srcSize / fastestC ); (double)srcSize / fastestC );
(void)fastestD; (void)crcOrig; /* unused when decompression disabled */ (void)fastestD; (void)crcOrig; /* unused when decompression disabled */
@ -389,7 +389,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
ZSTD_DDict* ddict = ZSTD_createDDict(dictBuffer, dictBufferSize); ZSTD_DDict* ddict = ZSTD_createDDict(dictBuffer, dictBufferSize);
if (!ddict) EXM_THROW(2, "ZSTD_createDDict() allocation failure"); if (!ddict) EXM_THROW(2, "ZSTD_createDDict() allocation failure");
do { do {
U32 blockNb; unsigned blockNb;
for (blockNb=0; blockNb<nbBlocks; blockNb++) { for (blockNb=0; blockNb<nbBlocks; blockNb++) {
size_t const regenSize = ZSTD_decompress_usingDDict(dctx, size_t const regenSize = ZSTD_decompress_usingDDict(dctx,
blockTable[blockNb].resPtr, blockTable[blockNb].srcSize, blockTable[blockNb].resPtr, blockTable[blockNb].srcSize,
@ -510,7 +510,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
markNb = (markNb+1) % NB_MARKS; markNb = (markNb+1) % NB_MARKS;
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s ,%6.1f MB/s\r", DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s ,%6.1f MB/s\r",
marks[markNb], displayName, (U32)srcSize, (U32)cSize, ratio, marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio,
(double)srcSize / fastestC, (double)srcSize / fastestC,
(double)srcSize / fastestD ); (double)srcSize / fastestD );
@ -521,9 +521,9 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
DISPLAY("!!! WARNING !!! %14s : Invalid Checksum : %x != %x \n", displayName, (unsigned)crcOrig, (unsigned)crcCheck); DISPLAY("!!! WARNING !!! %14s : Invalid Checksum : %x != %x \n", displayName, (unsigned)crcOrig, (unsigned)crcCheck);
for (u=0; u<srcSize; u++) { for (u=0; u<srcSize; u++) {
if (((const BYTE*)srcBuffer)[u] != ((const BYTE*)resultBuffer)[u]) { if (((const BYTE*)srcBuffer)[u] != ((const BYTE*)resultBuffer)[u]) {
U32 segNb, bNb, pos; unsigned segNb, bNb, pos;
size_t bacc = 0; size_t bacc = 0;
DISPLAY("Decoding error at pos %u ", (U32)u); DISPLAY("Decoding error at pos %u ", (unsigned)u);
for (segNb = 0; segNb < nbBlocks; segNb++) { for (segNb = 0; segNb < nbBlocks; segNb++) {
if (bacc + blockTable[segNb].srcSize > u) break; if (bacc + blockTable[segNb].srcSize > u) break;
bacc += blockTable[segNb].srcSize; bacc += blockTable[segNb].srcSize;
@ -594,7 +594,9 @@ static void BMK_benchCLevel(void* srcBuffer, size_t benchedSize,
SET_REALTIME_PRIORITY; SET_REALTIME_PRIORITY;
if (g_displayLevel == 1 && !g_additionalParam) if (g_displayLevel == 1 && !g_additionalParam)
DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING, (U32)benchedSize, g_nbIterations, (U32)(g_blockSize>>10)); DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n",
ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING,
(unsigned)benchedSize, g_nbIterations, (unsigned)(g_blockSize>>10));
if (cLevelLast < cLevel) cLevelLast = cLevel; if (cLevelLast < cLevel) cLevelLast = cLevel;
@ -726,7 +728,7 @@ static void BMK_benchFileTable(const char** fileNamesTable, unsigned nbFiles,
dictBufferSize = (size_t)dictFileSize; dictBufferSize = (size_t)dictFileSize;
dictBuffer = malloc(dictBufferSize); dictBuffer = malloc(dictBufferSize);
if (dictBuffer==NULL) if (dictBuffer==NULL)
EXM_THROW(11, "not enough memory for dictionary (%u bytes)", (U32)dictBufferSize); EXM_THROW(11, "not enough memory for dictionary (%u bytes)", (unsigned)dictBufferSize);
BMK_loadFiles(dictBuffer, dictBufferSize, fileSizes, &dictFileName, 1); BMK_loadFiles(dictBuffer, dictBufferSize, fileSizes, &dictFileName, 1);
} }
@ -734,7 +736,7 @@ static void BMK_benchFileTable(const char** fileNamesTable, unsigned nbFiles,
benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3; benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3;
if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad; if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad;
if (benchedSize < totalSizeToLoad) if (benchedSize < totalSizeToLoad)
DISPLAY("Not enough memory; testing %u MB only...\n", (U32)(benchedSize >> 20)); DISPLAY("Not enough memory; testing %u MB only...\n", (unsigned)(benchedSize >> 20));
srcBuffer = malloc(benchedSize + !benchedSize); srcBuffer = malloc(benchedSize + !benchedSize);
if (!srcBuffer) EXM_THROW(12, "not enough memory"); if (!srcBuffer) EXM_THROW(12, "not enough memory");