Merge pull request #923 from lz4/fix784
fix efficiency of LZ4_compress_HC_destSize()
This commit is contained in:
commit
78f4fdbb89
24
lib/lz4.c
24
lib/lz4.c
@ -247,6 +247,7 @@ static const int LZ4_minLength = (MFLIMIT+1);
|
||||
/*-************************************
|
||||
* Types
|
||||
**************************************/
|
||||
#include <limits.h>
|
||||
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
|
||||
# include <stdint.h>
|
||||
typedef uint8_t BYTE;
|
||||
@ -256,7 +257,6 @@ static const int LZ4_minLength = (MFLIMIT+1);
|
||||
typedef uint64_t U64;
|
||||
typedef uintptr_t uptrval;
|
||||
#else
|
||||
# include <limits.h>
|
||||
# if UINT_MAX != 4294967295UL
|
||||
# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
|
||||
# endif
|
||||
@ -1185,13 +1185,14 @@ _last_literals:
|
||||
if (outputDirective == fillOutput) {
|
||||
/* adapt lastRun to fill 'dst' */
|
||||
assert(olimit >= op);
|
||||
lastRun = (size_t)(olimit-op) - 1;
|
||||
lastRun -= (lastRun+240)/255;
|
||||
lastRun = (size_t)(olimit-op) - 1/*token*/;
|
||||
lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
|
||||
} else {
|
||||
assert(outputDirective == limitedOutput);
|
||||
return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
|
||||
}
|
||||
}
|
||||
DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
|
||||
if (lastRun >= RUN_MASK) {
|
||||
size_t accumulator = lastRun - RUN_MASK;
|
||||
*op++ = RUN_MASK << ML_BITS;
|
||||
@ -1824,10 +1825,10 @@ LZ4_decompress_generic(
|
||||
length = token & ML_MASK;
|
||||
|
||||
if (length == ML_MASK) {
|
||||
variable_length_error error = ok;
|
||||
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
|
||||
length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error);
|
||||
if (error != ok) { goto _output_error; }
|
||||
variable_length_error error = ok;
|
||||
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
|
||||
length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error);
|
||||
if (error != ok) { goto _output_error; }
|
||||
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
|
||||
length += MINMATCH;
|
||||
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
|
||||
@ -1853,7 +1854,7 @@ LZ4_decompress_generic(
|
||||
continue;
|
||||
} } }
|
||||
|
||||
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
|
||||
if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
|
||||
/* match starting within external dictionary */
|
||||
if ((dict==usingExtDict) && (match < lowPrefix)) {
|
||||
if (unlikely(op+length > oend-LASTLITERALS)) {
|
||||
@ -2003,7 +2004,12 @@ LZ4_decompress_generic(
|
||||
/* We must be on the last sequence (or invalid) because of the parsing limitations
|
||||
* so check that we exactly consume the input and don't overrun the output buffer.
|
||||
*/
|
||||
if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) { goto _output_error; }
|
||||
if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) {
|
||||
DEBUGLOG(6, "should have been last run of literals")
|
||||
DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
|
||||
DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
|
||||
goto _output_error;
|
||||
}
|
||||
}
|
||||
memmove(op, ip, length); /* supports overlapping memory regions; only matters for in-place decompression scenarios */
|
||||
ip += length;
|
||||
|
123
lib/lz4hc.c
123
lib/lz4hc.c
@ -481,7 +481,7 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
|
||||
U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
|
||||
if (start==NULL) start = *anchor; /* only works for single segment */
|
||||
/* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
|
||||
DEBUGLOG(6, "pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u",
|
||||
DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
|
||||
pos,
|
||||
(U32)(*ip - *anchor), matchLength, (U32)(*ip-match),
|
||||
cost, totalCost);
|
||||
@ -490,7 +490,13 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
|
||||
|
||||
/* Encode Literal length */
|
||||
length = (size_t)(*ip - *anchor);
|
||||
if ((limit) && ((*op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) return 1; /* Check output limit */
|
||||
LZ4_STATIC_ASSERT(notLimited == 0);
|
||||
/* Check output limit */
|
||||
if (limit && ((*op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
|
||||
DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
|
||||
(int)length, (int)(oend-*op));
|
||||
return 1;
|
||||
}
|
||||
if (length >= RUN_MASK) {
|
||||
size_t len = length - RUN_MASK;
|
||||
*token = (RUN_MASK << ML_BITS);
|
||||
@ -511,7 +517,10 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
|
||||
/* Encode MatchLength */
|
||||
assert(matchLength >= MINMATCH);
|
||||
length = (size_t)matchLength - MINMATCH;
|
||||
if ((limit) && (*op + (length / 255) + (1 + LASTLITERALS) > oend)) return 1; /* Check output limit */
|
||||
if (limit && (*op + (length / 255) + (1 + LASTLITERALS) > oend)) {
|
||||
DEBUGLOG(6, "Not enough room to write match length");
|
||||
return 1; /* Check output limit */
|
||||
}
|
||||
if (length >= ML_MASK) {
|
||||
*token += ML_MASK;
|
||||
length -= ML_MASK;
|
||||
@ -565,7 +574,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
|
||||
/* init */
|
||||
*srcSizePtr = 0;
|
||||
if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
|
||||
if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
|
||||
if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
|
||||
|
||||
/* Main Loop */
|
||||
while (ip <= mflimit) {
|
||||
@ -637,7 +646,11 @@ _Search3:
|
||||
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
|
||||
ip = start2;
|
||||
optr = op;
|
||||
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) goto _dest_overflow;
|
||||
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
|
||||
ml = ml2;
|
||||
ref = ref2;
|
||||
goto _dest_overflow;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -709,17 +722,18 @@ _Search3:
|
||||
_last_literals:
|
||||
/* Encode Last Literals */
|
||||
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
|
||||
size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + litLength + lastRunSize;
|
||||
size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + llAdd + lastRunSize;
|
||||
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
|
||||
if (limit && (op + totalSize > oend)) {
|
||||
if (limit == limitedOutput) return 0; /* Check output limit */
|
||||
if (limit == limitedOutput) return 0;
|
||||
/* adapt lastRunSize to fill 'dest' */
|
||||
lastRunSize = (size_t)(oend - op) - 1;
|
||||
litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
lastRunSize -= litLength;
|
||||
lastRunSize = (size_t)(oend - op) - 1 /*token*/;
|
||||
llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
|
||||
lastRunSize -= llAdd;
|
||||
}
|
||||
ip = anchor + lastRunSize;
|
||||
DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
|
||||
ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
|
||||
|
||||
if (lastRunSize >= RUN_MASK) {
|
||||
size_t accumulator = lastRunSize - RUN_MASK;
|
||||
@ -739,9 +753,25 @@ _last_literals:
|
||||
|
||||
_dest_overflow:
|
||||
if (limit == fillOutput) {
|
||||
/* Assumption : ip, anchor, ml and ref must be set correctly */
|
||||
size_t const ll = (size_t)(ip - anchor);
|
||||
size_t const ll_addbytes = (ll + 240) / 255;
|
||||
size_t const ll_totalCost = 1 + ll_addbytes + ll;
|
||||
BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
|
||||
DEBUGLOG(6, "Last sequence overflowing");
|
||||
op = optr; /* restore correct out pointer */
|
||||
if (op + ll_totalCost <= maxLitPos) {
|
||||
/* ll validated; now adjust match length */
|
||||
size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
|
||||
size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
|
||||
assert(maxMlSize < INT_MAX); assert(ml >= 0);
|
||||
if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
|
||||
if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
|
||||
LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
|
||||
} }
|
||||
goto _last_literals;
|
||||
}
|
||||
/* compression failed */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -788,7 +818,8 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
|
||||
{ lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
|
||||
};
|
||||
|
||||
DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d)", ctx, src, *srcSizePtr);
|
||||
DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
|
||||
ctx, src, *srcSizePtr, limit);
|
||||
|
||||
if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
|
||||
if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
|
||||
@ -1075,8 +1106,8 @@ static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
|
||||
limitedOutput_directive limit)
|
||||
{
|
||||
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
|
||||
DEBUGLOG(4, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d)",
|
||||
LZ4_streamHCPtr, src, *srcSizePtr);
|
||||
DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
|
||||
LZ4_streamHCPtr, src, *srcSizePtr, limit);
|
||||
assert(ctxPtr != NULL);
|
||||
/* auto-init if forgotten */
|
||||
if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
|
||||
@ -1303,6 +1334,8 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
BYTE* op = (BYTE*) dst;
|
||||
BYTE* opSaved = (BYTE*) dst;
|
||||
BYTE* oend = op + dstCapacity;
|
||||
int ovml = MINMATCH; /* overflow - last sequence */
|
||||
const BYTE* ovref = NULL;
|
||||
|
||||
/* init */
|
||||
#ifdef LZ4HC_HEAPMODE
|
||||
@ -1328,8 +1361,11 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
int const firstML = firstMatch.len;
|
||||
const BYTE* const matchPos = ip - firstMatch.off;
|
||||
opSaved = op;
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) /* updates ip, op and anchor */
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
|
||||
ovml = firstML;
|
||||
ovref = matchPos;
|
||||
goto _dest_overflow;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1471,7 +1507,7 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
best_off = opt[last_match_pos].off;
|
||||
cur = last_match_pos - best_mlen;
|
||||
|
||||
encode: /* cur, last_match_pos, best_mlen, best_off must be set */
|
||||
encode: /* cur, last_match_pos, best_mlen, best_off must be set */
|
||||
assert(cur < LZ4_OPT_NUM);
|
||||
assert(last_match_pos >= 1); /* == 1 when only one candidate */
|
||||
DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
|
||||
@ -1501,16 +1537,18 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
assert(ml >= MINMATCH);
|
||||
assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
|
||||
opSaved = op;
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) /* updates ip, op and anchor */
|
||||
if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
|
||||
ovml = ml;
|
||||
ovref = ip - offset;
|
||||
goto _dest_overflow;
|
||||
} }
|
||||
} } }
|
||||
} /* while (ip <= mflimit) */
|
||||
|
||||
_last_literals:
|
||||
_last_literals:
|
||||
/* Encode Last Literals */
|
||||
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
|
||||
size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + litLength + lastRunSize;
|
||||
size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
size_t const totalSize = 1 + llAdd + lastRunSize;
|
||||
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
|
||||
if (limit && (op + totalSize > oend)) {
|
||||
if (limit == limitedOutput) { /* Check output limit */
|
||||
@ -1518,11 +1556,12 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
goto _return_label;
|
||||
}
|
||||
/* adapt lastRunSize to fill 'dst' */
|
||||
lastRunSize = (size_t)(oend - op) - 1;
|
||||
litLength = (lastRunSize + 255 - RUN_MASK) / 255;
|
||||
lastRunSize -= litLength;
|
||||
lastRunSize = (size_t)(oend - op) - 1 /*token*/;
|
||||
llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
|
||||
lastRunSize -= llAdd;
|
||||
}
|
||||
ip = anchor + lastRunSize;
|
||||
DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
|
||||
ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
|
||||
|
||||
if (lastRunSize >= RUN_MASK) {
|
||||
size_t accumulator = lastRunSize - RUN_MASK;
|
||||
@ -1541,14 +1580,32 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
|
||||
retval = (int) ((char*)op-dst);
|
||||
goto _return_label;
|
||||
|
||||
_dest_overflow:
|
||||
if (limit == fillOutput) {
|
||||
op = opSaved; /* restore correct out pointer */
|
||||
goto _last_literals;
|
||||
}
|
||||
_return_label:
|
||||
_dest_overflow:
|
||||
if (limit == fillOutput) {
|
||||
/* Assumption : ip, anchor, ovml and ovref must be set correctly */
|
||||
size_t const ll = (size_t)(ip - anchor);
|
||||
size_t const ll_addbytes = (ll + 240) / 255;
|
||||
size_t const ll_totalCost = 1 + ll_addbytes + ll;
|
||||
BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
|
||||
DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
|
||||
op = opSaved; /* restore correct out pointer */
|
||||
if (op + ll_totalCost <= maxLitPos) {
|
||||
/* ll validated; now adjust match length */
|
||||
size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
|
||||
size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
|
||||
assert(maxMlSize < INT_MAX); assert(ovml >= 0);
|
||||
if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
|
||||
if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
|
||||
DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
|
||||
DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
|
||||
LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
|
||||
DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
|
||||
} }
|
||||
goto _last_literals;
|
||||
}
|
||||
_return_label:
|
||||
#ifdef LZ4HC_HEAPMODE
|
||||
free(opt);
|
||||
#endif
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
|
@ -815,7 +815,8 @@ size_t test_lz4f_decompression_wBuffers(
|
||||
U64 crcOrig,
|
||||
U32* const randState,
|
||||
LZ4F_dctx* const dCtx,
|
||||
U32 seed, U32 testNb)
|
||||
U32 seed, U32 testNb,
|
||||
int findErrorPos)
|
||||
{
|
||||
const BYTE* ip = (const BYTE*)cSrc;
|
||||
const BYTE* const iend = ip + cSize;
|
||||
@ -863,8 +864,8 @@ size_t test_lz4f_decompression_wBuffers(
|
||||
op[oSizeMax], mark);
|
||||
}
|
||||
if (LZ4F_getErrorCode(moreToFlush) == LZ4F_ERROR_contentChecksum_invalid) {
|
||||
DISPLAYLEVEL(2, "checksum error detected \n");
|
||||
locateBuffDiff(srcRef, dst, decompressedSize, o_scenario);
|
||||
if (findErrorPos) DISPLAYLEVEL(2, "checksum error detected \n");
|
||||
if (findErrorPos) locateBuffDiff(srcRef, dst, decompressedSize, o_scenario);
|
||||
}
|
||||
if (LZ4F_isError(moreToFlush)) return moreToFlush;
|
||||
|
||||
@ -885,7 +886,7 @@ size_t test_lz4f_decompression_wBuffers(
|
||||
if (totalOut) { /* otherwise, it's a skippable frame */
|
||||
U64 const crcDecoded = XXH64_digest(&xxh64);
|
||||
if (crcDecoded != crcOrig) {
|
||||
locateBuffDiff(srcRef, dst, decompressedSize, o_scenario);
|
||||
if (findErrorPos) locateBuffDiff(srcRef, dst, decompressedSize, o_scenario);
|
||||
return LZ4F_ERROR_contentChecksum_invalid;
|
||||
} }
|
||||
return 0;
|
||||
@ -897,7 +898,8 @@ size_t test_lz4f_decompression(const void* cSrc, size_t cSize,
|
||||
U64 crcOrig,
|
||||
U32* const randState,
|
||||
LZ4F_dctx* const dCtx,
|
||||
U32 seed, U32 testNb)
|
||||
U32 seed, U32 testNb,
|
||||
int findErrorPos)
|
||||
{
|
||||
o_scenario_e const o_scenario = (o_scenario_e)(FUZ_rand(randState) % 3); /* 0 : contiguous; 1 : non-contiguous; 2 : dst overwritten */
|
||||
/* tighten dst buffer conditions */
|
||||
@ -914,7 +916,7 @@ size_t test_lz4f_decompression(const void* cSrc, size_t cSize,
|
||||
crcOrig,
|
||||
randState,
|
||||
dCtx,
|
||||
seed, testNb);
|
||||
seed, testNb, findErrorPos);
|
||||
|
||||
free(dstBuffer);
|
||||
return result;
|
||||
@ -1068,7 +1070,7 @@ int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, double compressi
|
||||
|
||||
/* multi-segments decompression */
|
||||
DISPLAYLEVEL(6, "normal decompression \n");
|
||||
{ size_t result = test_lz4f_decompression(compressedBuffer, cSize, srcStart, srcSize, crcOrig, &randState, dCtx, seed, testNb);
|
||||
{ size_t result = test_lz4f_decompression(compressedBuffer, cSize, srcStart, srcSize, crcOrig, &randState, dCtx, seed, testNb, 1 /*findError*/ );
|
||||
CHECK (LZ4F_isError(result), "multi-segment decompression failed (error %i => %s)",
|
||||
(int)result, LZ4F_getErrorName(result));
|
||||
}
|
||||
@ -1098,7 +1100,7 @@ int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, double compressi
|
||||
|
||||
/* test decompression on noisy src */
|
||||
DISPLAYLEVEL(6, "noisy decompression \n");
|
||||
test_lz4f_decompression(compressedBuffer, cSize, srcStart, srcSize, crcOrig, &randState, dCtxNoise, seed, testNb);
|
||||
test_lz4f_decompression(compressedBuffer, cSize, srcStart, srcSize, crcOrig, &randState, dCtxNoise, seed, testNb, 0 /*don't search error Pos*/ );
|
||||
/* note : we don't analyze result here : it probably failed, which is expected.
|
||||
* The sole purpose is to catch potential out-of-bound reads and writes. */
|
||||
LZ4F_resetDecompressionContext(dCtxNoise); /* context must be reset after an error */
|
||||
|
118
tests/fuzzer.c
118
tests/fuzzer.c
@ -446,10 +446,10 @@ static int FUZ_test(U32 seed, U32 nbCycles, const U32 startCycle, const double c
|
||||
FUZ_DISPLAYTEST();
|
||||
decodedBuffer[srcSize] = canary;
|
||||
{ int const dSize = LZ4_decompress_safe(compressedBuffer, decodedBuffer, cSize, srcSize);
|
||||
FUZ_CHECKTEST(dSize<0, "LZ4_decompress_safe() failed on data compressed by LZ4_compressHC_destSize");
|
||||
FUZ_CHECKTEST(dSize!=srcSize, "LZ4_decompress_safe() failed : did not fully decompressed data");
|
||||
FUZ_CHECKTEST(dSize<0, "LZ4_decompress_safe failed (%i) on data compressed by LZ4_compressHC_destSize", dSize);
|
||||
FUZ_CHECKTEST(dSize!=srcSize, "LZ4_decompress_safe failed : decompressed %i bytes, was supposed to decompress %i bytes", dSize, srcSize);
|
||||
}
|
||||
FUZ_CHECKTEST(decodedBuffer[srcSize] != canary, "LZ4_decompress_safe() overwrite dst buffer !");
|
||||
FUZ_CHECKTEST(decodedBuffer[srcSize] != canary, "LZ4_decompress_safe overwrite dst buffer !");
|
||||
{ U32 const crcDec = XXH32(decodedBuffer, (size_t)srcSize, 0);
|
||||
FUZ_CHECKTEST(crcDec!=crcBase, "LZ4_decompress_safe() corrupted decoded data");
|
||||
} }
|
||||
@ -931,7 +931,8 @@ static int FUZ_test(U32 seed, U32 nbCycles, const U32 startCycle, const double c
|
||||
LZ4_resetStreamHC_fast (LZ4_streamHC, compressionLevel);
|
||||
LZ4_attach_HC_dictionary(LZ4_streamHC, LZ4dictHC);
|
||||
ret = LZ4_compress_HC_continue(LZ4_streamHC, block, compressedBuffer, blockSize, blockContinueCompressedSize);
|
||||
FUZ_CHECKTEST(ret!=blockContinueCompressedSize, "LZ4_compress_HC_continue using ExtDictCtx and fast reset size is different (%i != %i)", ret, blockContinueCompressedSize);
|
||||
FUZ_CHECKTEST(ret!=blockContinueCompressedSize, "LZ4_compress_HC_continue using ExtDictCtx and fast reset size is different (%i != %i)",
|
||||
ret, blockContinueCompressedSize);
|
||||
FUZ_CHECKTEST(ret<=0, "LZ4_compress_HC_continue using ExtDictCtx and fast reset should work : enough size available within output buffer");
|
||||
FUZ_CHECKTEST(LZ4_streamHC->internal_donotuse.dirty, "Context should be clean");
|
||||
|
||||
@ -957,7 +958,8 @@ static int FUZ_test(U32 seed, U32 nbCycles, const U32 startCycle, const double c
|
||||
LZ4_loadDictHC(LZ4dictHC, dict, dictSize);
|
||||
LZ4_setCompressionLevel(LZ4dictHC, compressionLevel);
|
||||
blockContinueCompressedSize = LZ4_compress_HC_continue_destSize(LZ4dictHC, block, compressedBuffer, &consumedSize, availableSpace);
|
||||
DISPLAYLEVEL(5, " LZ4_compress_HC_continue_destSize : compressed %6i/%6i into %6i/%6i at cLevel=%i\n", consumedSize, blockSize, blockContinueCompressedSize, availableSpace, compressionLevel);
|
||||
DISPLAYLEVEL(5, " LZ4_compress_HC_continue_destSize : compressed %6i/%6i into %6i/%6i at cLevel=%i \n",
|
||||
consumedSize, blockSize, blockContinueCompressedSize, availableSpace, compressionLevel);
|
||||
FUZ_CHECKTEST(blockContinueCompressedSize==0, "LZ4_compress_HC_continue_destSize failed");
|
||||
FUZ_CHECKTEST(blockContinueCompressedSize > availableSpace, "LZ4_compress_HC_continue_destSize write overflow");
|
||||
FUZ_CHECKTEST(consumedSize > blockSize, "LZ4_compress_HC_continue_destSize read overflow");
|
||||
@ -965,7 +967,7 @@ static int FUZ_test(U32 seed, U32 nbCycles, const U32 startCycle, const double c
|
||||
FUZ_DISPLAYTEST();
|
||||
decodedBuffer[consumedSize] = 0;
|
||||
ret = LZ4_decompress_safe_usingDict(compressedBuffer, decodedBuffer, blockContinueCompressedSize, consumedSize, dict, dictSize);
|
||||
FUZ_CHECKTEST(ret!=consumedSize, "LZ4_decompress_safe_usingDict did not regenerate original data");
|
||||
FUZ_CHECKTEST(ret != consumedSize, "LZ4_decompress_safe_usingDict regenerated %i bytes (%i expected)", ret, consumedSize);
|
||||
FUZ_CHECKTEST(decodedBuffer[consumedSize], "LZ4_decompress_safe_usingDict overrun specified output buffer size")
|
||||
{ U32 const crcSrc = XXH32(block, (size_t)consumedSize, 0);
|
||||
U32 const crcDst = XXH32(decodedBuffer, (size_t)consumedSize, 0);
|
||||
@ -1200,12 +1202,11 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
|
||||
/* LZ4 HC streaming tests */
|
||||
{ LZ4_streamHC_t sHC; /* statically allocated */
|
||||
U64 crcOrig;
|
||||
int result;
|
||||
LZ4_initStreamHC(&sHC, sizeof(sHC));
|
||||
|
||||
/* Allocation test */
|
||||
DISPLAYLEVEL(3, " Basic HC allocation : ");
|
||||
DISPLAYLEVEL(3, "Basic HC allocation : ");
|
||||
{ LZ4_streamHC_t* const sp = LZ4_createStreamHC();
|
||||
FUZ_CHECKTEST(sp==NULL, "LZ4_createStreamHC() allocation failed");
|
||||
LZ4_freeStreamHC(sp);
|
||||
@ -1213,7 +1214,7 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
DISPLAYLEVEL(3, " OK \n");
|
||||
|
||||
/* simple HC compression test */
|
||||
DISPLAYLEVEL(3, " Simple HC round-trip : ");
|
||||
DISPLAYLEVEL(3, "Simple HC round-trip : ");
|
||||
{ U64 const crc64 = XXH64(testInput, testCompressedSize, 0);
|
||||
LZ4_setCompressionLevel(&sHC, compressionLevel);
|
||||
result = LZ4_compress_HC_continue(&sHC, testInput, testCompressed, testCompressedSize, testCompressedSize-1);
|
||||
@ -1228,7 +1229,7 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
DISPLAYLEVEL(3, " OK \n");
|
||||
|
||||
/* long sequence test */
|
||||
DISPLAYLEVEL(3, " Long sequence HC test : ");
|
||||
DISPLAYLEVEL(3, "Long sequence HC_destSize test : ");
|
||||
{ size_t const blockSize = 1 MB;
|
||||
size_t const targetSize = 4116; /* size carefully selected to trigger an overflow */
|
||||
void* const block = malloc(blockSize);
|
||||
@ -1246,8 +1247,13 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
assert(targetSize < INT_MAX);
|
||||
result = LZ4_compress_HC_destSize(&sHC, (const char*)block, (char*)dstBlock, &srcSize, (int)targetSize, 3);
|
||||
DISPLAYLEVEL(4, "cSize=%i; readSize=%i; ", result, srcSize);
|
||||
FUZ_CHECKTEST(result!=4116, "LZ4_compress_HC_destSize() : compression must fill dstBuffer completely, but no more !");
|
||||
FUZ_CHECKTEST(((char*)dstBlock)[targetSize] != sentinel, "LZ4_compress_HC_destSize()")
|
||||
FUZ_CHECKTEST(result != 4116, "LZ4_compress_HC_destSize() : "
|
||||
"compression (%i->%i) must fill dstBuffer (%i) exactly",
|
||||
srcSize, result, (int)targetSize);
|
||||
FUZ_CHECKTEST(((char*)dstBlock)[targetSize] != sentinel,
|
||||
"LZ4_compress_HC_destSize() overwrites dst buffer");
|
||||
FUZ_CHECKTEST(srcSize < 1045000, "LZ4_compress_HC_destSize() doesn't compress enough"
|
||||
" (%i -> %i , expected > %i)", srcSize, result, 1045000);
|
||||
|
||||
LZ4_resetStreamHC_fast(&sHC, 3); /* make sure the context is clean after the test */
|
||||
free(block);
|
||||
@ -1256,7 +1262,7 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
DISPLAYLEVEL(3, " OK \n");
|
||||
|
||||
/* simple dictionary HC compression test */
|
||||
DISPLAYLEVEL(3, " HC dictionary compression test : ");
|
||||
DISPLAYLEVEL(3, "HC dictionary compression test : ");
|
||||
{ U64 const crc64 = XXH64(testInput + 64 KB, testCompressedSize, 0);
|
||||
LZ4_resetStreamHC_fast(&sHC, compressionLevel);
|
||||
LZ4_loadDictHC(&sHC, testInput, 64 KB);
|
||||
@ -1325,6 +1331,7 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
XXH64_reset(&crcNewState, 0);
|
||||
|
||||
while (segStart + segSize < testInputSize) {
|
||||
XXH64_hash_t crcOrig;
|
||||
XXH64_update(&crcOrigState, testInput + segStart, segSize);
|
||||
crcOrig = XXH64_digest(&crcOrigState);
|
||||
assert(segSize <= INT_MAX);
|
||||
@ -1371,6 +1378,7 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
|
||||
while (iNext + messageSize < testCompressedSize) {
|
||||
int compressedSize;
|
||||
XXH64_hash_t crcOrig;
|
||||
XXH64_update(&xxhOrig, testInput + iNext, messageSize);
|
||||
crcOrig = XXH64_digest(&xxhOrig);
|
||||
|
||||
@ -1385,7 +1393,7 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
FUZ_CHECKTEST(result!=(int)messageSize, "ringBuffer : LZ4_decompress_safe_continue() test failed");
|
||||
|
||||
XXH64_update(&xxhNewSafe, testVerify + dNext, messageSize);
|
||||
{ U64 const crcNew = XXH64_digest(&xxhNewSafe);
|
||||
{ XXH64_hash_t const crcNew = XXH64_digest(&xxhNewSafe);
|
||||
FUZ_CHECKTEST(crcOrig!=crcNew, "LZ4_decompress_safe_continue() decompression corruption"); }
|
||||
|
||||
assert(messageSize < INT_MAX);
|
||||
@ -1393,7 +1401,7 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
FUZ_CHECKTEST(result!=compressedSize, "ringBuffer : LZ4_decompress_fast_continue() test failed");
|
||||
|
||||
XXH64_update(&xxhNewFast, testVerify + dNext, messageSize);
|
||||
{ U64 const crcNew = XXH64_digest(&xxhNewFast);
|
||||
{ XXH64_hash_t const crcNew = XXH64_digest(&xxhNewFast);
|
||||
FUZ_CHECKTEST(crcOrig!=crcNew, "LZ4_decompress_fast_continue() decompression corruption"); }
|
||||
|
||||
/* prepare next message */
|
||||
@ -1414,6 +1422,7 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
*/
|
||||
{ XXH64_state_t xxhOrig;
|
||||
XXH64_state_t xxhNewSafe, xxhNewFast;
|
||||
XXH64_hash_t crcOrig;
|
||||
LZ4_streamDecode_t decodeStateSafe, decodeStateFast;
|
||||
const int maxMessageSizeLog = 12;
|
||||
const int maxMessageSize = 1 << maxMessageSizeLog;
|
||||
@ -1510,9 +1519,86 @@ static void FUZ_unitTests(int compressionLevel)
|
||||
iNext = (FUZ_rand(&randState) & 65535);
|
||||
if (dNext + maxMessageSize > dBufferSize) dNext = 0;
|
||||
}
|
||||
}
|
||||
} /* Ring buffer test : Non synchronized decoder */
|
||||
}
|
||||
|
||||
DISPLAYLEVEL(3, "LZ4_compress_HC_destSize : ");
|
||||
/* encode congenerical sequence test for HC compressors */
|
||||
{ LZ4_streamHC_t sHC; /* statically allocated */
|
||||
int const src_buf_size = 3 MB;
|
||||
int const dst_buf_size = 6 KB;
|
||||
int const payload = 0;
|
||||
int const dst_step = 43;
|
||||
int const dst_min_len = 33 + (FUZ_rand(&randState) % dst_step);
|
||||
int const dst_max_len = 5000;
|
||||
int slen, dlen;
|
||||
char* sbuf1 = (char*)malloc(src_buf_size + 1);
|
||||
char* sbuf2 = (char*)malloc(src_buf_size + 1);
|
||||
char* dbuf1 = (char*)malloc(dst_buf_size + 1);
|
||||
char* dbuf2 = (char*)malloc(dst_buf_size + 1);
|
||||
|
||||
assert(dst_buf_size > dst_max_len);
|
||||
if (!sbuf1 || !sbuf2 || !dbuf1 || !dbuf2) {
|
||||
EXIT_MSG("not enough memory for FUZ_unitTests (destSize)");
|
||||
}
|
||||
for (dlen = dst_min_len; dlen <= dst_max_len; dlen += dst_step) {
|
||||
int src_len = (dlen - 10)*255 + 24;
|
||||
if (src_len + 10 >= src_buf_size) break; /* END of check */
|
||||
for (slen = src_len - 3; slen <= src_len + 3; slen++) {
|
||||
int srcsz1, srcsz2;
|
||||
int dsz1, dsz2;
|
||||
int res1, res2;
|
||||
char const endchk = (char)0x88;
|
||||
DISPLAYLEVEL(5, "slen = %i, ", slen);
|
||||
|
||||
srcsz1 = slen;
|
||||
memset(sbuf1, payload, slen);
|
||||
memset(dbuf1, 0, dlen);
|
||||
dbuf1[dlen] = endchk;
|
||||
dsz1 = LZ4_compress_destSize(sbuf1, dbuf1, &srcsz1, dlen);
|
||||
DISPLAYLEVEL(5, "LZ4_compress_destSize: %i bytes compressed into %i bytes, ", srcsz1, dsz1);
|
||||
DISPLAYLEVEL(5, "last token : 0x%0X, ", dbuf1[dsz1 - 6]);
|
||||
DISPLAYLEVEL(5, "last ML extra lenbyte : 0x%0X, \n", dbuf1[dsz1 - 7]);
|
||||
FUZ_CHECKTEST(dbuf1[dlen] != endchk, "LZ4_compress_destSize() overwrite dst buffer !");
|
||||
FUZ_CHECKTEST(dsz1 <= 0, "LZ4_compress_destSize() compression failed");
|
||||
FUZ_CHECKTEST(dsz1 > dlen, "LZ4_compress_destSize() result larger than dst buffer !");
|
||||
FUZ_CHECKTEST(srcsz1 > slen, "LZ4_compress_destSize() read more than src buffer !");
|
||||
|
||||
res1 = LZ4_decompress_safe(dbuf1, sbuf1, dsz1, src_buf_size);
|
||||
FUZ_CHECKTEST(res1 != srcsz1, "LZ4_compress_destSize() decompression failed!");
|
||||
|
||||
srcsz2 = slen;
|
||||
memset(sbuf2, payload, slen);
|
||||
memset(dbuf2, 0, dlen);
|
||||
dbuf2[dlen] = endchk;
|
||||
LZ4_resetStreamHC(&sHC, compressionLevel);
|
||||
dsz2 = LZ4_compress_HC_destSize(&sHC, sbuf2, dbuf2, &srcsz2, dlen, compressionLevel);
|
||||
DISPLAYLEVEL(5, "LZ4_compress_HC_destSize: %i bytes compressed into %i bytes, ", srcsz2, dsz2);
|
||||
DISPLAYLEVEL(5, "last token : 0x%0X, ", dbuf2[dsz2 - 6]);
|
||||
DISPLAYLEVEL(5, "last ML extra lenbyte : 0x%0X, \n", dbuf2[dsz2 - 7]);
|
||||
FUZ_CHECKTEST(dbuf2[dlen] != endchk, "LZ4_compress_HC_destSize() overwrite dst buffer !");
|
||||
FUZ_CHECKTEST(dsz2 <= 0, "LZ4_compress_HC_destSize() compression failed");
|
||||
FUZ_CHECKTEST(dsz2 > dlen, "LZ4_compress_HC_destSize() result larger than dst buffer !");
|
||||
FUZ_CHECKTEST(srcsz2 > slen, "LZ4_compress_HC_destSize() read more than src buffer !");
|
||||
FUZ_CHECKTEST(dsz2 != dsz1, "LZ4_compress_HC_destSize() return incorrect result !");
|
||||
FUZ_CHECKTEST(srcsz2 != srcsz1, "LZ4_compress_HC_destSize() return incorrect src buffer size "
|
||||
": srcsz2(%i) != srcsz1(%i)", srcsz2, srcsz1);
|
||||
FUZ_CHECKTEST(memcmp(dbuf2, dbuf1, (size_t)dsz2), "LZ4_compress_HC_destSize() return incorrect data into dst buffer !");
|
||||
|
||||
res2 = LZ4_decompress_safe(dbuf2, sbuf1, dsz2, src_buf_size);
|
||||
FUZ_CHECKTEST(res2 != srcsz1, "LZ4_compress_HC_destSize() decompression failed!");
|
||||
|
||||
FUZ_CHECKTEST(memcmp(sbuf1, sbuf2, (size_t)res2), "LZ4_compress_HC_destSize() decompression corruption!");
|
||||
}
|
||||
}
|
||||
free(sbuf1);
|
||||
free(sbuf2);
|
||||
free(dbuf1);
|
||||
free(dbuf2);
|
||||
}
|
||||
DISPLAYLEVEL(3, " OK \n");
|
||||
|
||||
|
||||
/* clean up */
|
||||
free(testInput);
|
||||
free(testCompressed);
|
||||
|
Loading…
Reference in New Issue
Block a user