fixed frametest error
The error can be reproduced using following command : ./frametest -v -i100000000 -s1659 -t31096808 It's actually a bug in the stream LZ4 API, when starting a new stream and providing a first chunk to complete with size < MINMATCH. In which case, the chunk becomes a dictionary. No hash was generated and stored, but the chunk is accessible as default position 0 points to dictStart, and position 0 is still within MAX_DISTANCE. Then, next attempt to read 32-bits from position 0 fails. The issue would have been mitigated by starting from index 64 KB, effectively eliminating position 0 as too far away. The proper fix is to eliminate such "dictionary" as too small. Which is what this patch does.
This commit is contained in:
parent
d358e33faa
commit
af12733467
13
lib/lz4.c
13
lib/lz4.c
@ -767,6 +767,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic(
|
||||
} else if (dictDirective==usingExtDict) {
|
||||
if (matchIndex < startIndex) {
|
||||
DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
|
||||
assert(startIndex - matchIndex >= MINMATCH);
|
||||
match = dictBase + matchIndex;
|
||||
lowLimit = dictionary;
|
||||
} else {
|
||||
@ -989,6 +990,7 @@ _last_literals:
|
||||
if (outputLimited == fillOutput) {
|
||||
*inputConsumed = (int) (((const char*)ip)-source);
|
||||
}
|
||||
DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, (int)(((char*)op) - dest));
|
||||
return (int)(((char*)op) - dest);
|
||||
}
|
||||
|
||||
@ -1255,12 +1257,19 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch
|
||||
{
|
||||
const tableType_t tableType = byU32;
|
||||
LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
|
||||
const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
|
||||
const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
|
||||
|
||||
if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
|
||||
LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
|
||||
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
|
||||
|
||||
/* invalidate tiny dictionaries */
|
||||
if (streamPtr->dictSize < 4) {
|
||||
streamPtr->dictSize = 0;
|
||||
streamPtr->dictionary = (const BYTE*)source;
|
||||
dictEnd = (const BYTE*)source;
|
||||
}
|
||||
|
||||
/* Check overlapping input/dictionary space */
|
||||
{ const BYTE* sourceEnd = (const BYTE*) source + inputSize;
|
||||
if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
|
||||
@ -1402,6 +1411,8 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic(
|
||||
const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
|
||||
const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
|
||||
|
||||
DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i)", srcSize);
|
||||
|
||||
/* Special cases */
|
||||
if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => just decode everything */
|
||||
if ((endOnInput) && (unlikely(outputSize==0))) return ((srcSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
|
||||
|
@ -808,6 +808,7 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
|
||||
LZ4F_lastBlockStatus lastBlockCompressed = notDone;
|
||||
compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
|
||||
|
||||
DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize);
|
||||
|
||||
if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
|
||||
if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
|
||||
|
@ -740,8 +740,9 @@ static void locateBuffDiff(const void* buff1, const void* buff2, size_t size, un
|
||||
size_t p=0;
|
||||
const BYTE* b1=(const BYTE*)buff1;
|
||||
const BYTE* b2=(const BYTE*)buff2;
|
||||
DISPLAY("locateBuffDiff: looking for error position \n");
|
||||
if (nonContiguous) {
|
||||
DISPLAY("Non-contiguous output test (%i bytes)\n", (int)size);
|
||||
DISPLAY("mode %u: non-contiguous output (%zu bytes), cannot search \n", nonContiguous, size);
|
||||
return;
|
||||
}
|
||||
while (p < size && b1[p]==b2[p]) p++;
|
||||
@ -838,6 +839,8 @@ int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, double compressi
|
||||
size_t const iSize = MIN(sampleMax, (size_t)(iend-ip));
|
||||
size_t const oSize = LZ4F_compressBound(iSize, prefsPtr);
|
||||
cOptions.stableSrc = ((FUZ_rand(&randState) & 3) == 1);
|
||||
DISPLAYLEVEL(6, "Sending %zi bytes to compress (stableSrc:%u) \n",
|
||||
iSize, cOptions.stableSrc);
|
||||
|
||||
result = LZ4F_compressUpdate(cCtx, op, oSize, ip, iSize, &cOptions);
|
||||
CHECK(LZ4F_isError(result), "Compression failed (error %i : %s)", (int)result, LZ4F_getErrorName(result));
|
||||
@ -882,7 +885,8 @@ int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, double compressi
|
||||
dOptions.stableDst = FUZ_rand(&randState) & 1;
|
||||
if (nonContiguousDst==2) dOptions.stableDst = 0; /* overwrite mode */
|
||||
result = LZ4F_decompress(dCtx, op, &oSize, ip, &iSize, &dOptions);
|
||||
if (LZ4F_getErrorCode(result) == LZ4F_ERROR_contentChecksum_invalid) locateBuffDiff(srcStart, decodedBuffer, srcSize, nonContiguousDst);
|
||||
if (LZ4F_getErrorCode(result) == LZ4F_ERROR_contentChecksum_invalid)
|
||||
locateBuffDiff(srcStart, decodedBuffer, srcSize, nonContiguousDst);
|
||||
CHECK(LZ4F_isError(result), "Decompression failed (error %i:%s)", (int)result, LZ4F_getErrorName(result));
|
||||
XXH64_update(&xxh64, op, (U32)oSize);
|
||||
totalOut += oSize;
|
||||
|
Loading…
Reference in New Issue
Block a user