Fix merge conflicts

This commit is contained in:
senhuang42 2020-08-26 15:43:38 -04:00
commit 3aec385a10
32 changed files with 1656 additions and 753 deletions

View File

@ -155,23 +155,25 @@ jobs:
sudo apt-get install gcc-mingw-w64
CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ CFLAGS="-Werror -O1" make zstd
armbuild:
runs-on: ubuntu-16.04 # doesn't work on latest
steps:
- uses: actions/checkout@v2
- name: ARM Build Test
run: |
make arminstall
make armbuild
# TODO: Broken test - fix and uncomment
# armbuild:
# runs-on: ubuntu-16.04 # doesn't work on latest
# steps:
# - uses: actions/checkout@v2
# - name: ARM Build Test
# run: |
# make arminstall
# make armbuild
armfuzz:
runs-on: ubuntu-16.04 # doesn't work on latest
steps:
- uses: actions/checkout@v2
- name: Qemu ARM emulation + Fuzz Test
run: |
make arminstall
make armfuzz
# TODO: Broken test - fix and uncomment
# armfuzz:
# runs-on: ubuntu-16.04 # doesn't work on latest
# steps:
# - uses: actions/checkout@v2
# - name: Qemu ARM emulation + Fuzz Test
# run: |
# make arminstall
# make armfuzz
bourne-shell:
runs-on: ubuntu-latest

View File

@ -38,8 +38,31 @@ const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
/*-**************************************************************
* FSE NCount encoding-decoding
****************************************************************/
size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
static U32 FSE_ctz(U32 val)
{
assert(val != 0);
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
return _BitScanForward(&r, val) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
return __builtin_ctz(val);
# elif defined(__ICCARM__) /* IAR Intrinsic */
return __CTZ(val);
# else /* Software version */
U32 count = 0;
while ((val & 1) == 0) {
val >>= 1;
++count;
}
return count;
# endif
}
}
FORCE_INLINE_TEMPLATE
size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
@ -50,11 +73,12 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
U32 bitStream;
int bitCount;
unsigned charnum = 0;
unsigned const maxSV1 = *maxSVPtr + 1;
int previous0 = 0;
if (hbSize < 4) {
/* This function only works when hbSize >= 4 */
char buffer[4] = {0};
if (hbSize < 8) {
/* This function only works when hbSize >= 8 */
char buffer[8] = {0};
memcpy(buffer, headerBuffer, hbSize);
{ size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
buffer, sizeof(buffer));
@ -62,7 +86,7 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
if (countSize > hbSize) return ERROR(corruption_detected);
return countSize;
} }
assert(hbSize >= 4);
assert(hbSize >= 8);
/* init */
memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
@ -76,36 +100,58 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
threshold = 1<<nbBits;
nbBits++;
while ((remaining>1) & (charnum<=*maxSVPtr)) {
for (;;) {
if (previous0) {
unsigned n0 = charnum;
while ((bitStream & 0xFFFF) == 0xFFFF) {
n0 += 24;
if (ip < iend-5) {
ip += 2;
bitStream = MEM_readLE32(ip) >> bitCount;
/* Count the number of repeats. Each time the
* 2-bit repeat code is 0b11 there is another
* repeat.
* Avoid UB by setting the high bit to 1.
*/
int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
while (repeats >= 12) {
charnum += 3 * 12;
if (LIKELY(ip <= iend-7)) {
ip += 3;
} else {
bitStream >>= 16;
bitCount += 16;
} }
while ((bitStream & 3) == 3) {
n0 += 3;
bitStream >>= 2;
bitCount += 2;
bitCount -= (int)(8 * (iend - 7 - ip));
bitCount &= 31;
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> bitCount;
repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
}
n0 += bitStream & 3;
charnum += 3 * repeats;
bitStream >>= 2 * repeats;
bitCount += 2 * repeats;
/* Add the final repeat which isn't 0b11. */
assert((bitStream & 3) < 3);
charnum += bitStream & 3;
bitCount += 2;
if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
while (charnum < n0) normalizedCounter[charnum++] = 0;
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
/* This is an error, but break and return an error
* at the end, because returning out of a loop makes
* it harder for the compiler to optimize.
*/
if (charnum >= maxSV1) break;
/* We don't need to set the normalized count to 0
* because we already memset the whole buffer to 0.
*/
if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
assert((bitCount >> 3) <= 3); /* For first condition to work */
ip += bitCount>>3;
bitCount &= 7;
bitStream = MEM_readLE32(ip) >> bitCount;
} else {
bitStream >>= 2;
} }
{ int const max = (2*threshold-1) - remaining;
bitCount -= (int)(8 * (iend - 4 - ip));
bitCount &= 31;
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> bitCount;
}
{
int const max = (2*threshold-1) - remaining;
int count;
if ((bitStream & (threshold-1)) < (U32)max) {
@ -118,24 +164,43 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
}
count--; /* extra accuracy */
remaining -= count < 0 ? -count : count; /* -1 means +1 */
/* When it matters (small blocks), this is a
* predictable branch, because we don't use -1.
*/
if (count >= 0) {
remaining -= count;
} else {
assert(count == -1);
remaining += count;
}
normalizedCounter[charnum++] = (short)count;
previous0 = !count;
while (remaining < threshold) {
nbBits--;
threshold >>= 1;
}
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
assert(threshold > 1);
if (remaining < threshold) {
/* This branch can be folded into the
* threshold update condition because we
* know that threshold > 1.
*/
if (remaining <= 1) break;
nbBits = BIT_highbit32(remaining) + 1;
threshold = 1 << (nbBits - 1);
}
if (charnum >= maxSV1) break;
if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
} else {
bitCount -= (int)(8 * (iend - 4 - ip));
bitCount &= 31;
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
} } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
bitStream = MEM_readLE32(ip) >> bitCount;
} }
if (remaining != 1) return ERROR(corruption_detected);
/* Only possible when there are too many zeros. */
if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
if (bitCount > 32) return ERROR(corruption_detected);
*maxSVPtr = charnum-1;
@ -143,6 +208,43 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
return ip-istart;
}
/* Avoids the FORCE_INLINE of the _body() function. */
static size_t FSE_readNCount_body_default(
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
#if DYNAMIC_BMI2
TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
#endif
size_t FSE_readNCount_bmi2(
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize, int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
#endif
(void)bmi2;
return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
size_t FSE_readNCount(
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
}
/*! HUF_readStats() :
Read compact Huffman tree, saved by HUF_writeCTable().
@ -154,6 +256,16 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize)
{
U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
}
FORCE_INLINE_TEMPLATE size_t HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize,
int bmi2)
{
U32 weightTotal;
const BYTE* ip = (const BYTE*) src;
@ -176,9 +288,8 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
huffWeight[n+1] = ip[n/2] & 15;
} } }
else { /* header compressed with FSE (normal case) */
FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */
oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2); /* max (hwSize-1) values decoded, as last one is implied */
if (FSE_isError(oSize)) return oSize;
}
@ -213,3 +324,37 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
*nbSymbolsPtr = (U32)(oSize+1);
return iSize+1;
}
/* Avoids the FORCE_INLINE of the _body() function. */
static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize)
{
return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
}
#if DYNAMIC_BMI2
static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize)
{
return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
}
#endif
size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize,
int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
#endif
(void)bmi2;
return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}

View File

@ -137,10 +137,16 @@ FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize
/*! FSE_normalizeCount():
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
useLowProbCount is a boolean parameter which trades off compressed size for
faster header decoding. When it is set to 1, the compressed data will be slightly
smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
is a good default, since header deserialization makes a big speed difference.
Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
@return : tableLog,
or an errorCode, which can be tested using FSE_isError() */
FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
/*! FSE_NCountWriteBound():
Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
@ -228,6 +234,13 @@ FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
const void* rBuffer, size_t rBuffSize);
/*! FSE_readNCount_bmi2():
* Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
*/
FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
const void* rBuffer, size_t rBuffSize, int bmi2);
/*! Constructor and Destructor of FSE_DTable.
Note that its size depends on 'tableLog' */
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
@ -309,9 +322,9 @@ unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsi
/* FSE_compress_wksp() :
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
* FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
* FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
*/
#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
@ -326,14 +339,24 @@ size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
*/
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
/**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue))
#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
typedef enum {
FSE_repeat_none, /**< Cannot use the previous table */

View File

@ -68,17 +68,24 @@ void FSE_freeDTable (FSE_DTable* dt)
free(dt);
}
size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) {
U32 wksp[FSE_BUILD_DTABLE_WKSP_SIZE_U32(FSE_TABLELOG_ABSOLUTE_MAX, FSE_MAX_SYMBOL_VALUE)];
return FSE_buildDTable_wksp(dt, normalizedCounter, maxSymbolValue, tableLog, wksp, sizeof(wksp));
}
static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
U16* symbolNext = (U16*)workSpace;
BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
U32 const maxSV1 = maxSymbolValue + 1;
U32 const tableSize = 1 << tableLog;
U32 highThreshold = tableSize-1;
/* Sanity Checks */
if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
@ -100,7 +107,53 @@ size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned
}
/* Spread symbols */
{ U32 const tableMask = tableSize-1;
if (highThreshold == tableSize - 1) {
size_t const tableMask = tableSize-1;
size_t const step = FSE_TABLESTEP(tableSize);
/* First lay down the symbols in order.
* We use a uint64_t to lay down 8 bytes at a time. This reduces branch
* misses since small blocks generally have small table logs, so nearly
* all symbols have counts <= 8. We ensure we have 8 bytes at the end of
* our buffer to handle the over-write.
*/
{
U64 const add = 0x0101010101010101ull;
size_t pos = 0;
U64 sv = 0;
U32 s;
for (s=0; s<maxSV1; ++s, sv += add) {
int i;
int const n = normalizedCounter[s];
MEM_write64(spread + pos, sv);
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
pos += n;
}
}
/* Now we spread those positions across the table.
* The benefit of doing it in two stages is that we avoid the the
* variable size inner loop, which caused lots of branch misses.
* Now we can run through all the positions without any branch misses.
* We unroll the loop twice, since that is what emperically worked best.
*/
{
size_t position = 0;
size_t s;
size_t const unroll = 2;
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
for (s = 0; s < (size_t)tableSize; s += unroll) {
size_t u;
for (u = 0; u < unroll; ++u) {
size_t const uPosition = (position + (u * step)) & tableMask;
tableDecode[uPosition].symbol = spread[s + u];
}
position = (position + (unroll * step)) & tableMask;
}
assert(position == 0);
}
} else {
U32 const tableMask = tableSize-1;
U32 const step = FSE_TABLESTEP(tableSize);
U32 s, position = 0;
for (s=0; s<maxSV1; s++) {
@ -125,6 +178,11 @@ size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned
return 0;
}
size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
}
#ifndef FSE_COMMONDEFS_ONLY
@ -252,25 +310,71 @@ size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
}
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
{
return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
}
FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
void* dst, size_t dstCapacity,
const void* cSrc, size_t cSrcSize,
unsigned maxLog, void* workSpace, size_t wkspSize,
int bmi2)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
short counting[FSE_MAX_SYMBOL_VALUE+1];
unsigned tableLog;
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
FSE_DTable* const dtable = (FSE_DTable*)workSpace;
/* normal FSE decoding mode */
size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
size_t const NCountLength = FSE_readNCount_bmi2(counting, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
if (FSE_isError(NCountLength)) return NCountLength;
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
assert(NCountLength <= cSrcSize);
ip += NCountLength;
cSrcSize -= NCountLength;
CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
workSpace = dtable + FSE_DTABLE_SIZE_U32(tableLog);
wkspSize -= FSE_DTABLE_SIZE(tableLog);
return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */
CHECK_F( FSE_buildDTable_internal(dtable, counting, maxSymbolValue, tableLog, workSpace, wkspSize) );
{
const void* ptr = dtable;
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
const U32 fastMode = DTableH->fastMode;
/* select fast mode (static) */
if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1);
return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0);
}
}
/* Avoids the FORCE_INLINE of the _body() function. */
static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
{
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
}
#if DYNAMIC_BMI2
TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
{
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
}
#endif
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
}
#endif
(void)bmi2;
return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
}
@ -278,8 +382,9 @@ typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
{
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
/* Static analyzer seems unable to understand this table will be properly initialized later */
U32 wksp[FSE_DECOMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, FSE_MAX_TABLELOG, wksp, sizeof(wksp));
}

View File

@ -111,6 +111,8 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
/* *** Dependencies *** */
#include "mem.h" /* U32 */
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
/* *** Constants *** */
@ -226,6 +228,19 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize);
/*! HUF_readStats_wksp() :
* Same as HUF_readStats() but takes an external workspace which must be
* 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
* If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
*/
#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workspace, size_t wkspSize,
int bmi2);
/** HUF_readCTable() :
* Loading a CTable saved with HUF_writeCTable() */
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
@ -332,6 +347,9 @@ size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstS
#endif
size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
#endif
#endif /* HUF_STATIC_LINKING_ONLY */

View File

@ -341,11 +341,10 @@ unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
}
/* Secondary normalization method.
To be used when primary method fails. */
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
{
short const NOT_YET_ASSIGNED = -2;
U32 s;
@ -362,7 +361,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
continue;
}
if (count[s] <= lowThreshold) {
norm[s] = -1;
norm[s] = lowProbCount;
distributed++;
total -= count[s];
continue;
@ -431,10 +430,9 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
return 0;
}
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
const unsigned* count, size_t total,
unsigned maxSymbolValue)
unsigned maxSymbolValue, unsigned useLowProbCount)
{
/* Sanity checks */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
@ -443,6 +441,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
short const lowProbCount = useLowProbCount ? -1 : 1;
U64 const scale = 62 - tableLog;
U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */
U64 const vStep = 1ULL<<(scale-20);
@ -456,7 +455,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
if (count[s] == total) return 0; /* rle special case */
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
if (count[s] <= lowThreshold) {
normalizedCounter[s] = -1;
normalizedCounter[s] = lowProbCount;
stillToDistribute--;
} else {
short proba = (short)((count[s]*step) >> scale);
@ -470,7 +469,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
} }
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
/* corner case, need another normalization method */
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
if (FSE_isError(errorCode)) return errorCode;
}
else normalizedCounter[largest] += (short)stillToDistribute;
@ -643,7 +642,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
/* init conditions */
if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
if (wkspSize < FSE_COMPRESS_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
if (srcSize <= 1) return 0; /* Not compressible */
if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
@ -656,7 +655,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
}
tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue, /* useLowProbCount */ srcSize >= 2048) );
/* Write table description header */
{ CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
@ -687,7 +686,7 @@ typedef struct {
size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
{
fseWkspMax_t scratchBuffer;
DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_COMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
}

View File

@ -85,7 +85,7 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight
}
tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
/* Write table description header */
{ CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), norm, maxSymbolValue, tableLog) );

View File

@ -50,6 +50,19 @@ static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
return maxSymbolValue;
}
/**
* Returns true if we should use ncount=-1 else we should
* use ncount=1 for low probability symbols instead.
*/
static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
{
/* Heuristic: This should cover most blocks <= 16K and
* start to fade out after 16K to about 32K depending on
* comprssibility.
*/
return nbSeq >= 2048;
}
/**
* Returns the cost in bytes of encoding the normalized count header.
* Returns an error if any of the helper functions return an error.
@ -60,7 +73,7 @@ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
BYTE wksp[FSE_NCOUNTBOUND];
S16 norm[MaxSeq + 1];
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max), "");
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
}
@ -253,7 +266,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
nbSeq_1--;
}
assert(nbSeq_1 > 1);
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max), "");
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize), "");

View File

@ -115,29 +115,51 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
/*-***************************/
typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */
/**
* Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
* a time.
*/
static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
U64 D4;
if (MEM_isLittleEndian()) {
D4 = symbol + (nbBits << 8);
} else {
D4 = (symbol << 8) + nbBits;
}
D4 *= 0x0001000100010001ULL;
return D4;
}
typedef struct {
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
} HUF_ReadDTableX1_Workspace;
size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
{
return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
}
size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
{
U32 tableLog = 0;
U32 nbSymbols = 0;
size_t iSize;
void* const dtPtr = DTable + 1;
HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
U32* rankVal;
BYTE* huffWeight;
size_t spaceUsed32 = 0;
rankVal = (U32 *)workSpace + spaceUsed32;
spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);
spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
/* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
if (HUF_isError(iSize)) return iSize;
/* Table header */
@ -148,39 +170,111 @@ size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize
memcpy(DTable, &dtd, sizeof(dtd));
}
/* Calculate starting value for each rank */
{ U32 n, nextRankStart = 0;
for (n=1; n<tableLog+1; n++) {
/* Compute symbols and rankStart given rankVal:
*
* rankVal already contains the number of values of each weight.
*
* symbols contains the symbols ordered by weight. First are the rankVal[0]
* weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
* symbols[0] is filled (but unused) to avoid a branch.
*
* rankStart contains the offset where each rank belongs in the DTable.
* rankStart[0] is not filled because there are no entries in the table for
* weight 0.
*/
{
int n;
int nextRankStart = 0;
int const unroll = 4;
int const nLimit = (int)nbSymbols - unroll + 1;
for (n=0; n<(int)tableLog+1; n++) {
U32 const current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
} }
nextRankStart += wksp->rankVal[n];
wksp->rankStart[n] = current;
}
for (n=0; n < nLimit; n += unroll) {
int u;
for (u=0; u < unroll; ++u) {
size_t const w = wksp->huffWeight[n+u];
wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
}
}
for (; n < (int)nbSymbols; ++n) {
size_t const w = wksp->huffWeight[n];
wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
}
}
/* fill DTable */
{ U32 n;
size_t const nEnd = nbSymbols;
for (n=0; n<nEnd; n++) {
size_t const w = huffWeight[n];
size_t const length = (1 << w) >> 1;
size_t const uStart = rankVal[w];
size_t const uEnd = uStart + length;
size_t u;
HUF_DEltX1 D;
D.byte = (BYTE)n;
D.nbBits = (BYTE)(tableLog + 1 - w);
rankVal[w] = (U32)uEnd;
if (length < 4) {
/* Use length in the loop bound so the compiler knows it is short. */
for (u = 0; u < length; ++u)
dt[uStart + u] = D;
} else {
/* Unroll the loop 4 times, we know it is a power of 2. */
for (u = uStart; u < uEnd; u += 4) {
dt[u + 0] = D;
dt[u + 1] = D;
dt[u + 2] = D;
dt[u + 3] = D;
} } } }
/* fill DTable
* We fill all entries of each weight in order.
* That way length is a constant for each iteration of the outter loop.
* We can switch based on the length to a different inner loop which is
* optimized for that particular case.
*/
{
U32 w;
int symbol=wksp->rankVal[0];
int rankStart=0;
for (w=1; w<tableLog+1; ++w) {
int const symbolCount = wksp->rankVal[w];
int const length = (1 << w) >> 1;
int uStart = rankStart;
BYTE const nbBits = (BYTE)(tableLog + 1 - w);
int s;
int u;
switch (length) {
case 1:
for (s=0; s<symbolCount; ++s) {
HUF_DEltX1 D;
D.byte = wksp->symbols[symbol + s];
D.nbBits = nbBits;
dt[uStart] = D;
uStart += 1;
}
break;
case 2:
for (s=0; s<symbolCount; ++s) {
HUF_DEltX1 D;
D.byte = wksp->symbols[symbol + s];
D.nbBits = nbBits;
dt[uStart+0] = D;
dt[uStart+1] = D;
uStart += 2;
}
break;
case 4:
for (s=0; s<symbolCount; ++s) {
U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
MEM_write64(dt + uStart, D4);
uStart += 4;
}
break;
case 8:
for (s=0; s<symbolCount; ++s) {
U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
MEM_write64(dt + uStart, D4);
MEM_write64(dt + uStart + 4, D4);
uStart += 8;
}
break;
default:
for (s=0; s<symbolCount; ++s) {
U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
for (u=0; u < length; u += 16) {
MEM_write64(dt + uStart + u + 0, D4);
MEM_write64(dt + uStart + u + 4, D4);
MEM_write64(dt + uStart + u + 8, D4);
MEM_write64(dt + uStart + u + 12, D4);
}
assert(u == length);
uStart += length;
}
break;
}
symbol += symbolCount;
rankStart += symbolCount * length;
}
}
return iSize;
}
@ -419,8 +513,7 @@ static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize,
workSpace, wkspSize);
size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
@ -1199,7 +1292,7 @@ size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstS
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;

View File

@ -114,6 +114,8 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
dctx->oversizedDuration = 0;
dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
dctx->outBufferMode = ZSTD_obm_buffered;
dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
dctx->validateChecksum = 1;
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentEndForFuzzing = NULL;
#endif
@ -446,7 +448,8 @@ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t he
RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
dictionary_wrong, "");
#endif
if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0;
if (dctx->validateChecksum) XXH64_reset(&dctx->xxhState, 0);
return 0;
}
@ -579,11 +582,11 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_copyRawBlock");
RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
if (dst == NULL) {
if (srcSize == 0) return 0;
RETURN_ERROR(dstBuffer_null, "");
}
RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
memcpy(dst, src, srcSize);
return srcSize;
}
@ -592,11 +595,11 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
BYTE b,
size_t regenSize)
{
RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
if (dst == NULL) {
if (regenSize == 0) return 0;
RETURN_ERROR(dstBuffer_null, "");
}
RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
memset(dst, b, regenSize);
return regenSize;
}
@ -661,7 +664,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
}
if (ZSTD_isError(decodedSize)) return decodedSize;
if (dctx->fParams.checksumFlag)
if (dctx->validateChecksum)
XXH64_update(&dctx->xxhState, op, decodedSize);
if (decodedSize != 0)
op += decodedSize;
@ -676,11 +679,13 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
corruption_detected, "");
}
if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
U32 checkRead;
RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
checkRead = MEM_readLE32(ip);
RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
if (!dctx->forceIgnoreChecksum) {
U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
U32 checkRead;
checkRead = MEM_readLE32(ip);
RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
}
ip += 4;
remainingSrcSize -= 4;
}
@ -977,7 +982,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
dctx->decodedSize += rSize;
if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
if (dctx->validateChecksum) XXH64_update(&dctx->xxhState, dst, rSize);
dctx->previousDstEnd = (char*)dst + rSize;
/* Stay on the same stage until we are finished streaming the block. */
@ -1007,10 +1012,13 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
case ZSTDds_checkChecksum:
assert(srcSize == 4); /* guaranteed by dctx->expected */
{ U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
U32 const check32 = MEM_readLE32(src);
DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
{
if (dctx->validateChecksum) {
U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
U32 const check32 = MEM_readLE32(src);
DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
}
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
return 0;
@ -1091,7 +1099,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
ZSTD_buildFSETable( entropy->OFTable,
offcodeNCount, offcodeMaxValue,
OF_base, OF_bits,
offcodeLog);
offcodeLog,
entropy->workspace, sizeof(entropy->workspace),
/* bmi2 */0);
dictPtr += offcodeHeaderSize;
}
@ -1104,7 +1114,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
ZSTD_buildFSETable( entropy->MLTable,
matchlengthNCount, matchlengthMaxValue,
ML_base, ML_bits,
matchlengthLog);
matchlengthLog,
entropy->workspace, sizeof(entropy->workspace),
/* bmi2 */ 0);
dictPtr += matchlengthHeaderSize;
}
@ -1117,7 +1129,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
ZSTD_buildFSETable( entropy->LLTable,
litlengthNCount, litlengthMaxValue,
LL_base, LL_bits,
litlengthLog);
litlengthLog,
entropy->workspace, sizeof(entropy->workspace),
/* bmi2 */ 0);
dictPtr += litlengthHeaderSize;
}
@ -1414,6 +1428,10 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
bounds.lowerBound = (int)ZSTD_obm_buffered;
bounds.upperBound = (int)ZSTD_obm_stable;
return bounds;
case ZSTD_d_forceIgnoreChecksum:
bounds.lowerBound = (int)ZSTD_d_validateChecksum;
bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
return bounds;
default:;
}
bounds.error = ERROR(parameter_unsupported);
@ -1453,6 +1471,10 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value
CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
dctx->outBufferMode = (ZSTD_outBufferMode_e)value;
return 0;
case ZSTD_d_forceIgnoreChecksum:
CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value);
dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value;
return 0;
default:;
}
RETURN_ERROR(parameter_unsupported, "");

View File

@ -364,23 +364,26 @@ static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddB
* generate FSE decoding table for one symbol (ll, ml or off)
* cannot fail if input is valid =>
* all inputs are presumed validated at this stage */
void
ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
FORCE_INLINE_TEMPLATE
void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U32* nbAdditionalBits,
unsigned tableLog)
unsigned tableLog, void* wksp, size_t wkspSize)
{
ZSTD_seqSymbol* const tableDecode = dt+1;
U16 symbolNext[MaxSeq+1];
U32 const maxSV1 = maxSymbolValue + 1;
U32 const tableSize = 1 << tableLog;
U32 highThreshold = tableSize-1;
U16* symbolNext = (U16*)wksp;
BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1);
U32 highThreshold = tableSize - 1;
/* Sanity Checks */
assert(maxSymbolValue <= MaxSeq);
assert(tableLog <= MaxFSELog);
assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE);
(void)wkspSize;
/* Init, lay down lowprob symbols */
{ ZSTD_seqSymbol_header DTableH;
DTableH.tableLog = tableLog;
@ -400,12 +403,65 @@ ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
}
/* Spread symbols */
{ U32 const tableMask = tableSize-1;
assert(tableSize <= 512);
/* Specialized symbol spreading for the case when there are
* no low probability (-1 count) symbols. When compressing
* small blocks we avoid low probability symbols to hit this
* case, since header decoding speed matters more.
*/
if (highThreshold == tableSize - 1) {
size_t const tableMask = tableSize-1;
size_t const step = FSE_TABLESTEP(tableSize);
/* First lay down the symbols in order.
* We use a uint64_t to lay down 8 bytes at a time. This reduces branch
* misses since small blocks generally have small table logs, so nearly
* all symbols have counts <= 8. We ensure we have 8 bytes at the end of
* our buffer to handle the over-write.
*/
{
U64 const add = 0x0101010101010101ull;
size_t pos = 0;
U64 sv = 0;
U32 s;
for (s=0; s<maxSV1; ++s, sv += add) {
int i;
int const n = normalizedCounter[s];
MEM_write64(spread + pos, sv);
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
pos += n;
}
}
/* Now we spread those positions across the table.
* The benefit of doing it in two stages is that we avoid the the
* variable size inner loop, which caused lots of branch misses.
* Now we can run through all the positions without any branch misses.
* We unroll the loop twice, since that is what emperically worked best.
*/
{
size_t position = 0;
size_t s;
size_t const unroll = 2;
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
for (s = 0; s < (size_t)tableSize; s += unroll) {
size_t u;
for (u = 0; u < unroll; ++u) {
size_t const uPosition = (position + (u * step)) & tableMask;
tableDecode[uPosition].baseValue = spread[s + u];
}
position = (position + (unroll * step)) & tableMask;
}
assert(position == 0);
}
} else {
U32 const tableMask = tableSize-1;
U32 const step = FSE_TABLESTEP(tableSize);
U32 s, position = 0;
for (s=0; s<maxSV1; s++) {
int i;
for (i=0; i<normalizedCounter[s]; i++) {
int const n = normalizedCounter[s];
for (i=0; i<n; i++) {
tableDecode[position].baseValue = s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
@ -414,7 +470,8 @@ ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
}
/* Build Decoding table */
{ U32 u;
{
U32 u;
for (u=0; u<tableSize; u++) {
U32 const symbol = tableDecode[u].baseValue;
U32 const nextState = symbolNext[symbol]++;
@ -423,7 +480,46 @@ ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
assert(nbAdditionalBits[symbol] < 255);
tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
tableDecode[u].baseValue = baseValue[symbol];
} }
}
}
}
/* Avoids the FORCE_INLINE of the _body() function. */
static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U32* nbAdditionalBits,
unsigned tableLog, void* wksp, size_t wkspSize)
{
ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
}
#if DYNAMIC_BMI2
TARGET_ATTRIBUTE("bmi2") static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U32* nbAdditionalBits,
unsigned tableLog, void* wksp, size_t wkspSize)
{
ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
}
#endif
void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U32* nbAdditionalBits,
unsigned tableLog, void* wksp, size_t wkspSize, int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue,
baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
return;
}
#endif
(void)bmi2;
ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue,
baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
}
@ -435,7 +531,8 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
const void* src, size_t srcSize,
const U32* baseValue, const U32* nbAdditionalBits,
const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
int ddictIsCold, int nbSeq)
int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize,
int bmi2)
{
switch(type)
{
@ -467,7 +564,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2);
*DTablePtr = DTableSpace;
return headerSize;
}
@ -521,7 +618,9 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ip, iend-ip,
LL_base, LL_bits,
LL_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
dctx->bmi2);
RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += llhSize;
}
@ -531,7 +630,9 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ip, iend-ip,
OF_base, OF_bits,
OF_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
dctx->bmi2);
RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += ofhSize;
}
@ -541,7 +642,9 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ip, iend-ip,
ML_base, ML_bits,
ML_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
dctx->bmi2);
RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += mlhSize;
}
@ -1085,14 +1188,14 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
#endif
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
BIT_reloadDStream(&(seqState.DStream));
op += oneSeqSize;
/* gcc and clang both don't like early returns in this loop.
* gcc doesn't like early breaks either.
* Instead save an error and report it at the end.
* When there is an error, don't increment op, so we don't
* overwrite.
* Instead break and check for an error at the end of the loop.
*/
if (UNLIKELY(ZSTD_isError(oneSeqSize))) error = oneSeqSize;
else op += oneSeqSize;
if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
error = oneSeqSize;
break;
}
if (UNLIKELY(!--nbSeq)) break;
}

View File

@ -48,12 +48,15 @@ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
* this function must be called with valid parameters only
* (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
* in which case it cannot fail.
* The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is
* defined in zstd_decompress_internal.h.
* Internal use only.
*/
void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U32* nbAdditionalBits,
unsigned tableLog);
unsigned tableLog, void* wksp, size_t wkspSize,
int bmi2);
#endif /* ZSTD_DEC_BLOCK_H */

View File

@ -73,12 +73,16 @@ static const U32 ML_base[MaxML+1] = {
#define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
typedef struct {
ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
U32 rep[ZSTD_REP_NUM];
U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
} ZSTD_entropyDTables_t;
typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
@ -122,6 +126,8 @@ struct ZSTD_DCtx_s
XXH64_state_t xxhState;
size_t headerSize;
ZSTD_format_e format;
ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */
U32 validateChecksum; /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */
const BYTE* litPtr;
ZSTD_customMem customMem;
size_t litSize;

View File

@ -786,7 +786,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
/* note : the result of this phase should be used to better appreciate the impact on statistics */
total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];
errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax);
errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax, /* useLowProbCount */ 1);
if (FSE_isError(errorCode)) {
eSize = errorCode;
DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");
@ -795,7 +795,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
Offlog = (U32)errorCode;
total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];
errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML);
errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML, /* useLowProbCount */ 1);
if (FSE_isError(errorCode)) {
eSize = errorCode;
DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");
@ -804,7 +804,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
mlLog = (U32)errorCode;
total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];
errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL);
errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL, /* useLowProbCount */ 1);
if (FSE_isError(errorCode)) {
eSize = errorCode;
DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");

View File

@ -528,11 +528,13 @@ typedef enum {
* At the time of this writing, they include :
* ZSTD_d_format
* ZSTD_d_stableOutBuffer
* ZSTD_d_forceIgnoreChecksum
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly
*/
ZSTD_d_experimentalParam1=1000,
ZSTD_d_experimentalParam2=1001
ZSTD_d_experimentalParam2=1001,
ZSTD_d_experimentalParam3=1002
} ZSTD_dParameter;
@ -1160,6 +1162,12 @@ typedef enum {
* Decoder cannot recognise automatically this format, requiring this instruction. */
} ZSTD_format_e;
typedef enum {
/* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
ZSTD_d_validateChecksum = 0,
ZSTD_d_ignoreChecksum = 1
} ZSTD_forceIgnoreChecksum_e;
typedef enum {
/* Note: this enum and the behavior it controls are effectively internal
* implementation details of the compressor. They are expected to continue
@ -1690,6 +1698,17 @@ ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowS
*/
#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
/* ZSTD_d_forceIgnoreChecksum
* Experimental parameter.
* Default is 0 == disabled. Set to 1 to enable
*
* Tells the decompressor to skip checksum validation during decompression, regardless
* of whether checksumming was specified during compression. This offers some
* slight performance benefits, and may be useful for debugging.
* Param has values of type ZSTD_forceIgnoreChecksum_e
*/
#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3
/*! ZSTD_DCtx_setFormat() :
* Instruct the decoder context about what kind of data to decode next.
* This instruction is mandatory to decode data without a fully-formed header,

View File

@ -319,6 +319,8 @@ struct FIO_prefs_s {
int excludeCompressedFiles;
int patchFromMode;
int contentSize;
int currFileIdx;
int nbFiles;
};
@ -360,6 +362,8 @@ FIO_prefs_t* FIO_createPreferences(void)
ret->testMode = 0;
ret->literalCompressionMode = ZSTD_lcm_auto;
ret->excludeCompressedFiles = 0;
ret->nbFiles = 1;
ret->currFileIdx = 0;
return ret;
}
@ -495,6 +499,16 @@ void FIO_setContentSize(FIO_prefs_t* const prefs, int value)
prefs->contentSize = value != 0;
}
void FIO_setNbFiles(FIO_prefs_t* const prefs, int value)
{
prefs->nbFiles = value;
}
void FIO_setCurrFileIdx(FIO_prefs_t* const prefs, int value)
{
prefs->currFileIdx = value;
}
/*-*************************************
* Functions
***************************************/
@ -1255,10 +1269,15 @@ FIO_compressZstdFrame(FIO_prefs_t* const prefs,
(unsigned)(zfp.produced >> 20),
cShare );
} else { /* summarized notifications if == 2; */
DISPLAYLEVEL(2, "\rRead : %u ", (unsigned)(zfp.consumed >> 20));
if (prefs->nbFiles > 1) {
DISPLAYLEVEL(2, "\rCompressing %u/%u files. Current source: %s ", prefs->currFileIdx+1, prefs->nbFiles, srcFileName);
} else {
DISPLAYLEVEL(2, "\r");
}
DISPLAYLEVEL(2, "Read : %u ", (unsigned)(zfp.consumed >> 20));
if (fileSize != UTIL_FILESIZE_UNKNOWN)
DISPLAYLEVEL(2, "/ %u ", (unsigned)(fileSize >> 20));
DISPLAYLEVEL(2, "MB ==> %2.f%% ", cShare);
DISPLAYLEVEL(2, "MB ==> %2.f%%", cShare);
DELAY_NEXT_UPDATE();
}
@ -1421,18 +1440,21 @@ FIO_compressFilename_internal(FIO_prefs_t* const prefs,
}
/* Status */
DISPLAYLEVEL(2, "\r%79s\r", "");
if (readsize == 0) {
DISPLAYLEVEL(2,"%-20s : (%6llu => %6llu bytes, %s) \n",
srcFileName,
(unsigned long long)readsize, (unsigned long long) compressedfilesize,
dstFileName);
} else {
DISPLAYLEVEL(2,"%-20s :%6.2f%% (%6llu => %6llu bytes, %s) \n",
srcFileName,
(double)compressedfilesize / readsize * 100,
(unsigned long long)readsize, (unsigned long long) compressedfilesize,
dstFileName);
if (g_display_prefs.displayLevel > 2 || (g_display_prefs.displayLevel == 2 && prefs->nbFiles <= 1)) {
if (readsize == 0) {
DISPLAYLEVEL(2,"%-20s : (%6llu => %6llu bytes, %s) \n",
srcFileName,
(unsigned long long)readsize, (unsigned long long) compressedfilesize,
dstFileName);
} else {
DISPLAYLEVEL(2,"%-20s :%6.2f%% (%6llu => %6llu bytes, %s) \n",
srcFileName,
(double)compressedfilesize / readsize * 100,
(unsigned long long)readsize, (unsigned long long) compressedfilesize,
dstFileName);
}
}
/* Elapsed Time and CPU Load */
@ -1656,7 +1678,7 @@ static unsigned long long FIO_getLargestFileSize(const char** inFileNames, unsig
* or into a destination folder (specified with -O)
*/
int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs,
const char** inFileNamesTable, unsigned nbFiles,
const char** inFileNamesTable,
const char* outMirroredRootDirName,
const char* outDirName,
const char* outFileName, const char* suffix,
@ -1665,7 +1687,7 @@ int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs,
{
int error = 0;
cRess_t ress = FIO_createCResources(prefs, dictFileName,
FIO_getLargestFileSize(inFileNamesTable, nbFiles),
FIO_getLargestFileSize(inFileNamesTable, prefs->nbFiles),
compressionLevel, comprParams);
/* init */
@ -1694,21 +1716,20 @@ int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs,
if (ress.dstFile == NULL) { /* could not open outFileName */
error = 1;
} else {
unsigned u;
for (u=0; u<nbFiles; u++)
error |= FIO_compressFilename_srcFile(prefs, ress, outFileName, inFileNamesTable[u], compressionLevel);
for (; prefs->currFileIdx < prefs->nbFiles; ++prefs->currFileIdx) {
error |= FIO_compressFilename_srcFile(prefs, ress, outFileName, inFileNamesTable[prefs->currFileIdx], compressionLevel);
}
if (fclose(ress.dstFile))
EXM_THROW(29, "Write error (%s) : cannot properly close %s",
strerror(errno), outFileName);
ress.dstFile = NULL;
}
} else {
unsigned int u=0;
if (outMirroredRootDirName)
UTIL_mirrorSourceFilesDirectories(inFileNamesTable, nbFiles, outMirroredRootDirName);
UTIL_mirrorSourceFilesDirectories(inFileNamesTable, prefs->nbFiles, outMirroredRootDirName);
for (u=0; u<nbFiles; u++) {
const char* const srcFileName = inFileNamesTable[u];
for (; prefs->currFileIdx < prefs->nbFiles; ++prefs->currFileIdx) {
const char* const srcFileName = inFileNamesTable[prefs->currFileIdx];
const char* dstFileName = NULL;
if (outMirroredRootDirName) {
char* validMirroredDirName = UTIL_createMirroredDestDirName(srcFileName, outMirroredRootDirName);
@ -1723,12 +1744,11 @@ int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs,
} else {
dstFileName = FIO_determineCompressedName(srcFileName, outDirName, suffix); /* cannot fail */
}
error |= FIO_compressFilename_srcFile(prefs, ress, dstFileName, srcFileName, compressionLevel);
}
if (outDirName)
FIO_checkFilenameCollisions(inFileNamesTable ,nbFiles);
FIO_checkFilenameCollisions(inFileNamesTable , prefs->nbFiles);
}
FIO_freeCResources(ress);
@ -1767,6 +1787,8 @@ static dRess_t FIO_createDResources(FIO_prefs_t* const prefs, const char* dictFi
if (ress.dctx==NULL)
EXM_THROW(60, "Error: %s : can't create ZSTD_DStream", strerror(errno));
CHECK( ZSTD_DCtx_setMaxWindowSize(ress.dctx, prefs->memLimit) );
CHECK( ZSTD_DCtx_setParameter(ress.dctx, ZSTD_d_forceIgnoreChecksum, !prefs->checksumFlag));
ress.srcBufferSize = ZSTD_DStreamInSize();
ress.srcBuffer = malloc(ress.srcBufferSize);
ress.dstBufferSize = ZSTD_DStreamOutSize();
@ -1998,8 +2020,13 @@ FIO_decompressZstdFrame(dRess_t* ress, FILE* finput,
/* Write block */
storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, outBuff.pos, prefs, storedSkips);
frameSize += outBuff.pos;
DISPLAYUPDATE(2, "\r%-20.20s : %u MB... ",
srcFileName, (unsigned)((alreadyDecoded+frameSize)>>20) );
if (prefs->nbFiles > 1) {
DISPLAYUPDATE(2, "\rDecompressing %u/%u files. Current source: %-20.20s : %u MB... ",
prefs->currFileIdx+1, prefs->nbFiles, srcFileName, (unsigned)((alreadyDecoded+frameSize)>>20) );
} else {
DISPLAYUPDATE(2, "\r%-20.20s : %u MB... ",
srcFileName, (unsigned)((alreadyDecoded+frameSize)>>20) );
}
if (inBuff.pos > 0) {
memmove(ress->srcBuffer, (char*)ress->srcBuffer + inBuff.pos, inBuff.size - inBuff.pos);
@ -2329,7 +2356,10 @@ static int FIO_decompressFrames(dRess_t ress, FILE* srcFile,
/* Final Status */
DISPLAYLEVEL(2, "\r%79s\r", "");
DISPLAYLEVEL(2, "%-20s: %llu bytes \n", srcFileName, filesize);
/* No status message in pipe mode (stdin - stdout) or multi-files mode */
if (g_display_prefs.displayLevel > 2 || (g_display_prefs.displayLevel == 2 && prefs->nbFiles <= 1)) {
DISPLAYLEVEL(2, "%-20s: %llu bytes \n", srcFileName, filesize);
}
return 0;
}
@ -2567,7 +2597,7 @@ FIO_determineDstName(const char* srcFileName, const char* outDirName)
int
FIO_decompressMultipleFilenames(FIO_prefs_t* const prefs,
const char** srcNamesTable, unsigned nbFiles,
const char** srcNamesTable,
const char* outMirroredRootDirName,
const char* outDirName, const char* outFileName,
const char* dictFileName)
@ -2576,23 +2606,22 @@ FIO_decompressMultipleFilenames(FIO_prefs_t* const prefs,
dRess_t ress = FIO_createDResources(prefs, dictFileName);
if (outFileName) {
unsigned u;
if (!prefs->testMode) {
ress.dstFile = FIO_openDstFile(prefs, NULL, outFileName);
if (ress.dstFile == 0) EXM_THROW(19, "cannot open %s", outFileName);
}
for (u=0; u<nbFiles; u++)
error |= FIO_decompressSrcFile(prefs, ress, outFileName, srcNamesTable[u]);
for (; prefs->currFileIdx < prefs->nbFiles; prefs->currFileIdx++) {
error |= FIO_decompressSrcFile(prefs, ress, outFileName, srcNamesTable[prefs->currFileIdx]);
}
if ((!prefs->testMode) && (fclose(ress.dstFile)))
EXM_THROW(72, "Write error : %s : cannot properly close output file",
strerror(errno));
} else {
unsigned int u = 0;
if (outMirroredRootDirName)
UTIL_mirrorSourceFilesDirectories(srcNamesTable, nbFiles, outMirroredRootDirName);
UTIL_mirrorSourceFilesDirectories(srcNamesTable, prefs->nbFiles, outMirroredRootDirName);
for (u=0; u<nbFiles; u++) { /* create dstFileName */
const char* const srcFileName = srcNamesTable[u];
for (; prefs->currFileIdx < prefs->nbFiles; prefs->currFileIdx++) { /* create dstFileName */
const char* const srcFileName = srcNamesTable[prefs->currFileIdx];
const char* dstFileName = NULL;
if (outMirroredRootDirName) {
char* validMirroredDirName = UTIL_createMirroredDestDirName(srcFileName, outMirroredRootDirName);
@ -2609,7 +2638,7 @@ FIO_decompressMultipleFilenames(FIO_prefs_t* const prefs,
error |= FIO_decompressSrcFile(prefs, ress, dstFileName, srcFileName);
}
if (outDirName)
FIO_checkFilenameCollisions(srcNamesTable ,nbFiles);
FIO_checkFilenameCollisions(srcNamesTable , prefs->nbFiles);
}
FIO_freeDResources(ress);

View File

@ -96,6 +96,8 @@ void FIO_setNotificationLevel(int level);
void FIO_setExcludeCompressedFile(FIO_prefs_t* const prefs, int excludeCompressedFiles);
void FIO_setPatchFromMode(FIO_prefs_t* const prefs, int value);
void FIO_setContentSize(FIO_prefs_t* const prefs, int value);
void FIO_setNbFiles(FIO_prefs_t* const prefs, int value);
void FIO_setCurrFileIdx(FIO_prefs_t* const prefs, int value);
/*-*************************************
* Single File functions
@ -121,7 +123,7 @@ int FIO_listMultipleFiles(unsigned numFiles, const char** filenameTable, int dis
/** FIO_compressMultipleFilenames() :
* @return : nb of missing files */
int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs,
const char** inFileNamesTable, unsigned nbFiles,
const char** inFileNamesTable,
const char* outMirroredDirName,
const char* outDirName,
const char* outFileName, const char* suffix,
@ -131,7 +133,7 @@ int FIO_compressMultipleFilenames(FIO_prefs_t* const prefs,
/** FIO_decompressMultipleFilenames() :
* @return : nb of missing or skipped files */
int FIO_decompressMultipleFilenames(FIO_prefs_t* const prefs,
const char** srcNamesTable, unsigned nbFiles,
const char** srcNamesTable,
const char* outMirroredDirName,
const char* outDirName,
const char* outFileName,

View File

@ -154,6 +154,19 @@ static void usage_advanced(const char* programName)
DISPLAYOUT( "--output-dir-mirror DIR : processed files are stored into DIR respecting original directory structure \n");
#endif
#ifndef ZSTD_NOCOMPRESS
DISPLAYOUT( "--[no-]check : during compression, add XXH64 integrity checksum to frame (default: enabled)");
#ifndef ZSTD_NODECOMPRESS
DISPLAYOUT( ". If specified with -d, decompressor will ignore/validate checksums in compressed frame (default: validate).");
#endif
#else
#ifdef ZSTD_NOCOMPRESS
DISPLAYOUT( "--[no-]check : during decompression, ignore/validate checksums in compressed frame (default: validate).");
#endif
#endif /* ZSTD_NOCOMPRESS */
DISPLAYOUT( "\n");
DISPLAYOUT( "-- : All arguments after \"--\" are treated as files \n");
#ifndef ZSTD_NOCOMPRESS
@ -174,7 +187,6 @@ static void usage_advanced(const char* programName)
DISPLAYOUT( "--size-hint=# optimize compression parameters for streaming input of approximately this size \n");
DISPLAYOUT( "--target-compressed-block-size=# : generate compressed block of approximately targeted size \n");
DISPLAYOUT( "--no-dictID : don't write dictID into header (dictionary compression only) \n");
DISPLAYOUT( "--[no-]check : add XXH64 integrity checksum to frame (default: enabled) \n");
DISPLAYOUT( "--[no-]compress-literals : force (un)compressed literals \n");
DISPLAYOUT( "--format=zstd : compress files to the .zst format (default) \n");
@ -1243,12 +1255,12 @@ int main(int const argCount, const char* argv[])
DISPLAY("error : can't use --patch-from=# on multiple files \n");
CLEAN_RETURN(1);
}
/* No status message in pipe mode (stdin - stdout) or multi-files mode */
/* No status message in pipe mode (stdin - stdout) */
if (!strcmp(filenames->fileNames[0], stdinmark) && outFileName && !strcmp(outFileName,stdoutmark) && (g_displayLevel==2)) g_displayLevel=1;
if ((filenames->tableSize > 1) & (g_displayLevel==2)) g_displayLevel=1;
/* IO Stream/File */
FIO_setNbFiles(prefs, (int)filenames->tableSize);
FIO_setNotificationLevel(g_displayLevel);
FIO_setPatchFromMode(prefs, patchFromDictFileName != NULL);
if (memLimit == 0) {
@ -1307,9 +1319,9 @@ int main(int const argCount, const char* argv[])
}
if ((filenames->tableSize==1) && outFileName)
operationResult = FIO_compressFilename(prefs, outFileName, filenames->fileNames[0], dictFileName, cLevel, compressionParams);
operationResult = FIO_compressFilename(prefs, outFileName, filenames->fileNames[0], dictFileName, cLevel, compressionParams);
else
operationResult = FIO_compressMultipleFilenames(prefs, filenames->fileNames, (unsigned)filenames->tableSize, outMirroredDirName, outDirName, outFileName, suffix, dictFileName, cLevel, compressionParams);
operationResult = FIO_compressMultipleFilenames(prefs, filenames->fileNames, outMirroredDirName, outDirName, outFileName, suffix, dictFileName, cLevel, compressionParams);
#else
(void)contentSize; (void)suffix; (void)adapt; (void)rsyncable; (void)ultra; (void)cLevel; (void)ldmFlag; (void)literalCompressionMode; (void)targetCBlockSize; (void)streamSrcSize; (void)srcSizeHint; (void)ZSTD_strategyMap; /* not used when ZSTD_NOCOMPRESS set */
DISPLAY("Compression not supported \n");
@ -1319,7 +1331,7 @@ int main(int const argCount, const char* argv[])
if (filenames->tableSize == 1 && outFileName) {
operationResult = FIO_decompressFilename(prefs, outFileName, filenames->fileNames[0], dictFileName);
} else {
operationResult = FIO_decompressMultipleFilenames(prefs, filenames->fileNames, (unsigned)filenames->tableSize, outMirroredDirName, outDirName, outFileName, dictFileName);
operationResult = FIO_decompressMultipleFilenames(prefs, filenames->fileNames, outMirroredDirName, outDirName, outFileName, dictFileName);
}
#else
DISPLAY("Decompression not supported \n");

View File

@ -859,7 +859,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
size_t nbSeq_1 = nbSeq;
const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
if (count[llCodeTable[nbSeq-1]]>1) { count[llCodeTable[nbSeq-1]]--; nbSeq_1--; }
FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max, nbSeq >= 2048);
{ size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
if (FSE_isError(NCountSize)) return ERROR(GENERIC);
op += NCountSize; }
@ -887,7 +887,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
size_t nbSeq_1 = nbSeq;
const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
if (count[ofCodeTable[nbSeq-1]]>1) { count[ofCodeTable[nbSeq-1]]--; nbSeq_1--; }
FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max, nbSeq >= 2048);
{ size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
if (FSE_isError(NCountSize)) return ERROR(GENERIC);
op += NCountSize; }
@ -917,7 +917,7 @@ static size_t writeSequences(U32* seed, frame_t* frame, seqStore_t* seqStorePtr,
size_t nbSeq_1 = nbSeq;
const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
if (count[mlCodeTable[nbSeq-1]]>1) { count[mlCodeTable[nbSeq-1]]--; nbSeq_1--; }
FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max, nbSeq >= 2048);
{ size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
if (FSE_isError(NCountSize)) return ERROR(GENERIC);
op += NCountSize; }

View File

@ -21,6 +21,7 @@
#include "mem.h" /* U32 */
#ifndef ZSTD_DLL_IMPORT
#include "zstd_internal.h" /* ZSTD_decodeSeqHeaders, ZSTD_blockHeaderSize, ZSTD_getcBlockSize, blockType_e, KB, MB */
#include "decompress/zstd_decompress_internal.h" /* ZSTD_DCtx struct */
#else
#define KB *(1 <<10)
#define MB *(1 <<20)
@ -134,6 +135,65 @@ static size_t local_ZSTD_decodeSeqHeaders(const void* src, size_t srcSize, void*
(void)src; (void)srcSize; (void)dst; (void)dstSize;
return ZSTD_decodeSeqHeaders(g_zdc, &nbSeq, buff2, g_cSize);
}
FORCE_NOINLINE size_t ZSTD_decodeLiteralsHeader(ZSTD_DCtx* dctx, void const* src, size_t srcSize)
{
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
{
BYTE const* istart = (BYTE const*)src;
symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
if (litEncType == set_compressed) {
RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
{
size_t lhSize, litSize, litCSize;
U32 const lhlCode = (istart[0] >> 2) & 3;
U32 const lhc = MEM_readLE32(istart);
switch(lhlCode)
{
case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
/* 2 - 2 - 10 - 10 */
lhSize = 3;
litSize = (lhc >> 4) & 0x3FF;
litCSize = (lhc >> 14) & 0x3FF;
break;
case 2:
/* 2 - 2 - 14 - 14 */
lhSize = 4;
litSize = (lhc >> 4) & 0x3FFF;
litCSize = lhc >> 18;
break;
case 3:
/* 2 - 2 - 18 - 18 */
lhSize = 5;
litSize = (lhc >> 4) & 0x3FFFF;
litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
break;
}
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
#ifndef HUF_FORCE_DECOMPRESS_X2
return HUF_readDTableX1_wksp_bmi2(
dctx->entropy.hufTable,
istart+lhSize, litCSize,
dctx->workspace, sizeof(dctx->workspace),
dctx->bmi2);
#else
return HUF_readDTableX2_wksp(
dctx->entropy.hufTable,
istart+lhSize, litCSize,
dctx->workspace, sizeof(dctx->workspace));
#endif
}
}
}
return 0;
}
static size_t local_ZSTD_decodeLiteralsHeader(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
{
(void)dst, (void)dstSize, (void)src, (void)srcSize;
return ZSTD_decodeLiteralsHeader(g_zdc, buff2, g_cSize);
}
#endif
static ZSTD_CStream* g_cstream= NULL;
@ -358,6 +418,9 @@ static int benchMem(unsigned benchNb,
case 13:
benchFunction = local_ZSTD_decompressContinue; benchName = "decompressContinue";
break;
case 30:
benchFunction = local_ZSTD_decodeLiteralsHeader; benchName = "decodeLiteralsHeader";
break;
case 31:
benchFunction = local_ZSTD_decodeLiteralsBlock; benchName = "decodeLiteralsBlock";
break;
@ -446,6 +509,8 @@ static int benchMem(unsigned benchNb,
case 13 :
g_cSize = ZSTD_compress(dstBuff2, dstBuffSize, src, srcSize, cLevel);
break;
case 30: /* ZSTD_decodeLiteralsHeader */
/* fall-through */
case 31: /* ZSTD_decodeLiteralsBlock : starts literals block in dstBuff2 */
{ size_t frameHeaderSize;
g_cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, cLevel);

View File

@ -95,7 +95,9 @@ FUZZ_TARGETS := \
simple_compress \
dictionary_loader \
raw_dictionary_round_trip \
dictionary_stream_round_trip
dictionary_stream_round_trip \
decompress_dstSize_tooSmall \
fse_read_ncount
all: $(FUZZ_TARGETS)
@ -180,6 +182,12 @@ zstd_frame_info: $(FUZZ_HEADERS) $(FUZZ_DECOMPRESS_OBJ) d_fuzz_zstd_frame_info.o
dictionary_loader: $(FUZZ_HEADERS) $(FUZZ_ROUND_TRIP_OBJ) rt_fuzz_dictionary_loader.o
$(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_ROUND_TRIP_OBJ) rt_fuzz_dictionary_loader.o $(LIB_FUZZING_ENGINE) -o $@
decompress_dstSize_tooSmall: $(FUZZ_HEADERS) $(FUZZ_DECOMPRESS_OBJ) d_fuzz_decompress_dstSize_tooSmall.o
$(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_DECOMPRESS_OBJ) d_fuzz_decompress_dstSize_tooSmall.o $(LIB_FUZZING_ENGINE) -o $@
fse_read_ncount: $(FUZZ_HEADERS) $(FUZZ_ROUND_TRIP_OBJ) rt_fuzz_fse_read_ncount.o
$(CXX) $(FUZZ_TARGET_FLAGS) $(FUZZ_ROUND_TRIP_OBJ) rt_fuzz_fse_read_ncount.o $(LIB_FUZZING_ENGINE) -o $@
libregression.a: $(FUZZ_HEADERS) $(PRGDIR)/util.h $(PRGDIR)/util.c d_fuzz_regression_driver.o
$(AR) $(FUZZ_ARFLAGS) $@ d_fuzz_regression_driver.o

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2016-2020, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/**
* This fuzz target attempts to decompress a valid compressed frame into
* an output buffer that is too small to ensure we always get
* ZSTD_error_dstSize_tooSmall.
*/
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include "fuzz_helpers.h"
#include "zstd.h"
#include "zstd_errors.h"
#include "zstd_helpers.h"
#include "fuzz_data_producer.h"
static ZSTD_CCtx *cctx = NULL;
static ZSTD_DCtx *dctx = NULL;
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
/* Give a random portion of src data to the producer, to use for
parameter generation. The rest will be used for (de)compression */
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
size_t rBufSize = FUZZ_dataProducer_uint32Range(producer, 0, size);
size = FUZZ_dataProducer_remainingBytes(producer);
/* Ensure the round-trip buffer is too small. */
if (rBufSize >= size) {
rBufSize = size > 0 ? size - 1 : 0;
}
size_t const cBufSize = ZSTD_compressBound(size);
if (!cctx) {
cctx = ZSTD_createCCtx();
FUZZ_ASSERT(cctx);
}
if (!dctx) {
dctx = ZSTD_createDCtx();
FUZZ_ASSERT(dctx);
}
void *cBuf = FUZZ_malloc(cBufSize);
void *rBuf = FUZZ_malloc(rBufSize);
size_t const cSize = ZSTD_compressCCtx(cctx, cBuf, cBufSize, src, size, 1);
FUZZ_ZASSERT(cSize);
size_t const rSize = ZSTD_decompressDCtx(dctx, rBuf, rBufSize, cBuf, cSize);
if (size == 0) {
FUZZ_ASSERT(rSize == 0);
} else {
FUZZ_ASSERT(ZSTD_isError(rSize));
FUZZ_ASSERT(ZSTD_getErrorCode(rSize) == ZSTD_error_dstSize_tooSmall);
}
free(cBuf);
free(rBuf);
FUZZ_dataProducer_free(producer);
#ifndef STATEFUL_FUZZING
ZSTD_freeCCtx(cctx); cctx = NULL;
ZSTD_freeDCtx(dctx); dctx = NULL;
#endif
return 0;
}

View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2016-2020, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/**
* This fuzz target round trips the FSE normalized count with FSE_writeNCount()
* and FSE_readNcount() to ensure that it can always round trip correctly.
*/
#define FSE_STATIC_LINKING_ONLY
#define ZSTD_STATIC_LINKING_ONLY
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "fuzz_helpers.h"
#include "zstd_helpers.h"
#include "fuzz_data_producer.h"
#include "fse.h"
int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
{
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
/* Pick a random tableLog and maxSymbolValue */
unsigned const tableLog = FUZZ_dataProducer_uint32Range(producer, FSE_MIN_TABLELOG, FSE_MAX_TABLELOG);
unsigned const maxSymbolValue = FUZZ_dataProducer_uint32Range(producer, 0, 255);
unsigned remainingWeight = (1u << tableLog) - 1;
size_t dataSize;
BYTE data[512];
short ncount[256];
/* Randomly fill the normalized count */
memset(ncount, 0, sizeof(ncount));
{
unsigned s;
for (s = 0; s < maxSymbolValue && remainingWeight > 0; ++s) {
short n = (short)FUZZ_dataProducer_int32Range(producer, -1, remainingWeight);
ncount[s] = n;
if (n < 0) {
remainingWeight -= 1;
} else {
assert((unsigned)n <= remainingWeight);
remainingWeight -= n;
}
}
/* Ensure ncount[maxSymbolValue] != 0 and the sum is (1<<tableLog) */
ncount[maxSymbolValue] = remainingWeight + 1;
if (ncount[maxSymbolValue] == 1 && FUZZ_dataProducer_uint32Range(producer, 0, 1) == 1) {
ncount[maxSymbolValue] = -1;
}
}
/* Write the normalized count */
{
FUZZ_ASSERT(sizeof(data) >= FSE_NCountWriteBound(maxSymbolValue, tableLog));
dataSize = FSE_writeNCount(data, sizeof(data), ncount, maxSymbolValue, tableLog);
FUZZ_ZASSERT(dataSize);
}
/* Read & validate the normalized count */
{
short rtNcount[256];
unsigned rtMaxSymbolValue = 255;
unsigned rtTableLog;
/* Copy into a buffer with a random amount of random data at the end */
size_t const buffSize = (size_t)FUZZ_dataProducer_uint32Range(producer, dataSize, sizeof(data));
BYTE* const buff = FUZZ_malloc(buffSize);
size_t rtDataSize;
memcpy(buff, data, dataSize);
{
size_t b;
for (b = dataSize; b < buffSize; ++b) {
buff[b] = (BYTE)FUZZ_dataProducer_uint32Range(producer, 0, 255);
}
}
rtDataSize = FSE_readNCount(rtNcount, &rtMaxSymbolValue, &rtTableLog, buff, buffSize);
FUZZ_ZASSERT(rtDataSize);
FUZZ_ASSERT(rtDataSize == dataSize);
FUZZ_ASSERT(rtMaxSymbolValue == maxSymbolValue);
FUZZ_ASSERT(rtTableLog == tableLog);
{
unsigned s;
for (s = 0; s <= maxSymbolValue; ++s) {
FUZZ_ASSERT(ncount[s] == rtNcount[s]);
}
}
free(buff);
}
FUZZ_dataProducer_free(producer);
return 0;
}

View File

@ -59,6 +59,8 @@ TARGET_INFO = {
'dictionary_loader': TargetInfo(InputType.DICTIONARY_DATA),
'raw_dictionary_round_trip': TargetInfo(InputType.RAW_DATA),
'dictionary_stream_round_trip': TargetInfo(InputType.RAW_DATA),
'decompress_dstSize_tooSmall': TargetInfo(InputType.RAW_DATA),
'fse_read_ncount': TargetInfo(InputType.RAW_DATA),
}
TARGETS = list(TARGET_INFO.keys())
ALL_TARGETS = TARGETS + ['all']

View File

@ -66,6 +66,10 @@ size_t FUZZ_dataProducer_remainingBytes(FUZZ_dataProducer_t *producer){
return producer->size;
}
int FUZZ_dataProducer_empty(FUZZ_dataProducer_t *producer) {
return producer->size == 0;
}
size_t FUZZ_dataProducer_contract(FUZZ_dataProducer_t *producer, size_t newSize)
{
newSize = newSize > producer->size ? producer->size : newSize;

View File

@ -49,6 +49,9 @@ int32_t FUZZ_dataProducer_int32Range(FUZZ_dataProducer_t *producer,
/* Returns the size of the remaining bytes of data in the producer */
size_t FUZZ_dataProducer_remainingBytes(FUZZ_dataProducer_t *producer);
/* Returns true if the data producer is out of bytes */
int FUZZ_dataProducer_empty(FUZZ_dataProducer_t *producer);
/* Restricts the producer to only the last newSize bytes of data.
If newSize > current data size, nothing happens. Returns the number of bytes
the producer won't use anymore, after contracting. */

View File

@ -19,6 +19,7 @@
#include <stdio.h>
#include "fuzz_helpers.h"
#include "zstd.h"
#include "zstd_errors.h"
#include "zstd_helpers.h"
#include "fuzz_data_producer.h"
@ -42,7 +43,10 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
}
void *rBuf = FUZZ_malloc(bufSize);
ZSTD_compressCCtx(cctx, rBuf, bufSize, src, size, cLevel);
size_t const ret = ZSTD_compressCCtx(cctx, rBuf, bufSize, src, size, cLevel);
if (ZSTD_isError(ret)) {
FUZZ_ASSERT(ZSTD_getErrorCode(ret) == ZSTD_error_dstSize_tooSmall);
}
free(rBuf);
FUZZ_dataProducer_free(producer);
#ifndef STATEFUL_FUZZING

View File

@ -47,8 +47,12 @@ static size_t roundTripTest(void *result, size_t resultCapacity,
FUZZ_ZASSERT(cSize);
dSize = ZSTD_decompressDCtx(dctx, result, resultCapacity, compressed, cSize);
FUZZ_ZASSERT(dSize);
/* When superblock is enabled make sure we don't expand the block more than expected. */
if (targetCBlockSize != 0) {
/* When superblock is enabled make sure we don't expand the block more than expected.
* NOTE: This test is currently disabled because superblock mode can arbitrarily
* expand the block in the worst case. Once superblock mode has been improved we can
* re-enable this test.
*/
if (0 && targetCBlockSize != 0) {
size_t normalCSize;
FUZZ_ZASSERT(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetCBlockSize, 0));
normalCSize = ZSTD_compress2(cctx, compressed, compressedCapacity, src, srcSize);

View File

@ -22,18 +22,19 @@
#include "zstd.h"
#include "fuzz_data_producer.h"
static size_t const kBufSize = ZSTD_BLOCKSIZE_MAX;
static ZSTD_DStream *dstream = NULL;
static void* buf = NULL;
uint32_t seed;
static ZSTD_outBuffer makeOutBuffer(FUZZ_dataProducer_t *producer, uint32_t min)
static ZSTD_outBuffer makeOutBuffer(FUZZ_dataProducer_t *producer, void* buf, size_t bufSize)
{
ZSTD_outBuffer buffer = { buf, 0, 0 };
buffer.size = (FUZZ_dataProducer_uint32Range(producer, min, kBufSize));
FUZZ_ASSERT(buffer.size <= kBufSize);
if (FUZZ_dataProducer_empty(producer)) {
buffer.size = bufSize;
} else {
buffer.size = (FUZZ_dataProducer_uint32Range(producer, 0, bufSize));
}
FUZZ_ASSERT(buffer.size <= bufSize);
if (buffer.size == 0) {
buffer.dst = NULL;
@ -43,13 +44,16 @@ static ZSTD_outBuffer makeOutBuffer(FUZZ_dataProducer_t *producer, uint32_t min)
}
static ZSTD_inBuffer makeInBuffer(const uint8_t **src, size_t *size,
FUZZ_dataProducer_t *producer,
uint32_t min)
FUZZ_dataProducer_t *producer)
{
ZSTD_inBuffer buffer = { *src, 0, 0 };
FUZZ_ASSERT(*size > 0);
buffer.size = (FUZZ_dataProducer_uint32Range(producer, min, *size));
if (FUZZ_dataProducer_empty(producer)) {
buffer.size = *size;
} else {
buffer.size = (FUZZ_dataProducer_uint32Range(producer, 0, *size));
}
FUZZ_ASSERT(buffer.size <= *size);
*src += buffer.size;
*size -= buffer.size;
@ -66,18 +70,15 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
/* Give a random portion of src data to the producer, to use for
parameter generation. The rest will be used for (de)compression */
FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(src, size);
/* Guarantee forward progress by refusing to generate 2 zero sized
* buffers in a row. */
int prevInWasZero = 0;
int prevOutWasZero = 0;
int stableOutBuffer;
ZSTD_outBuffer out;
void* buf;
size_t bufSize;
size = FUZZ_dataProducer_reserveDataPrefix(producer);
bufSize = MAX(10 * size, ZSTD_BLOCKSIZE_MAX);
/* Allocate all buffers and contexts if not already allocated */
if (!buf) {
buf = FUZZ_malloc(kBufSize);
}
buf = FUZZ_malloc(bufSize);
if (!dstream) {
dstream = ZSTD_createDStream();
@ -90,18 +91,19 @@ int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size)
if (stableOutBuffer) {
FUZZ_ZASSERT(ZSTD_DCtx_setParameter(dstream, ZSTD_d_stableOutBuffer, 1));
out.dst = buf;
out.size = kBufSize;
out.size = bufSize;
out.pos = 0;
} else {
out = makeOutBuffer(producer, buf, bufSize);
}
while (size > 0) {
ZSTD_inBuffer in = makeInBuffer(&src, &size, producer, prevInWasZero ? 1 : 0);
prevInWasZero = in.size == 0;
ZSTD_inBuffer in = makeInBuffer(&src, &size, producer);
while (in.pos != in.size) {
if (!stableOutBuffer || prevOutWasZero || FUZZ_dataProducer_uint32Range(producer, 0, 100) == 55) {
out = makeOutBuffer(producer, prevOutWasZero ? 1 : 0);
if (out.pos == out.size) {
if (stableOutBuffer) goto error;
out = makeOutBuffer(producer, buf, bufSize);
}
prevOutWasZero = out.size == 0;
size_t const rc = ZSTD_decompressStream(dstream, &out, &in);
if (ZSTD_isError(rc)) goto error;
}
@ -112,5 +114,6 @@ error:
ZSTD_freeDStream(dstream); dstream = NULL;
#endif
FUZZ_dataProducer_free(producer);
free(buf);
return 0;
}

View File

@ -544,6 +544,45 @@ static int basicUnitTests(U32 const seed, double compressibility)
if (ZSTD_getErrorCode(r) != ZSTD_error_dstSize_tooSmall) goto _output_error; }
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : decompress with corrupted checksum : ", testNb++);
{ /* create compressed buffer with checksumming enabled */
ZSTD_CCtx* const cctx = ZSTD_createCCtx();
if (!cctx) {
DISPLAY("Not enough memory, aborting\n");
testResult = 1;
goto _end;
}
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, 1) );
CHECK_VAR(cSize, ZSTD_compress2(cctx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize) );
ZSTD_freeCCtx(cctx);
}
{ /* copy the compressed buffer and corrupt the checksum */
size_t r;
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
if (!dctx) {
DISPLAY("Not enough memory, aborting\n");
testResult = 1;
goto _end;
}
((char*)compressedBuffer)[cSize-1] += 1;
r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (!ZSTD_isError(r)) goto _output_error;
if (ZSTD_getErrorCode(r) != ZSTD_error_checksum_wrong) goto _output_error;
CHECK_Z(ZSTD_DCtx_setParameter(dctx, ZSTD_d_forceIgnoreChecksum, ZSTD_d_ignoreChecksum));
r = ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize-1);
if (!ZSTD_isError(r)) goto _output_error; /* wrong checksum size should still throw error */
r = ZSTD_decompressDCtx(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (ZSTD_isError(r)) goto _output_error;
ZSTD_freeDCtx(dctx);
}
DISPLAYLEVEL(3, "OK \n");
DISPLAYLEVEL(3, "test%3i : ZSTD_decompressBound test with content size missing : ", testNb++);
{ /* create compressed buffer with content size missing */
ZSTD_CCtx* const cctx = ZSTD_createCCtx();
@ -1573,11 +1612,11 @@ static int basicUnitTests(U32 const seed, double compressibility)
const void* const contentStart = (const char*)dict + flatdictSize;
size_t const target_nodict_cSize[22+1] = { 3840, 3770, 3870, 3830, 3770,
3770, 3770, 3770, 3750, 3750,
3740, 3670, 3670, 3660, 3660,
3742, 3670, 3670, 3660, 3660,
3660, 3660, 3660, 3660, 3660,
3660, 3660, 3660 };
size_t const target_wdict_cSize[22+1] = { 2830, 2890, 2890, 2820, 2940,
2950, 2950, 2920, 2900, 2890,
2950, 2950, 2921, 2900, 2891,
2910, 2910, 2910, 2770, 2760,
2750, 2750, 2750, 2750, 2750,
2750, 2750, 2750 };
@ -2744,7 +2783,7 @@ static int basicUnitTests(U32 const seed, double compressibility)
/* Calling FSE_normalizeCount() on a uniform distribution should not
* cause a division by zero.
*/
FSE_normalizeCount(norm, tableLog, count, nbSeq, maxSymbolValue);
FSE_normalizeCount(norm, tableLog, count, nbSeq, maxSymbolValue, /* useLowProbCount */ 1);
}
DISPLAYLEVEL(3, "OK \n");
#ifdef ZSTD_MULTITHREAD
@ -3053,7 +3092,7 @@ static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, U32 const
DISPLAYLEVEL(5, "fuzzer t%u: compress into too small buffer of size %u (missing %u bytes) \n",
testNb, (unsigned)tooSmallSize, (unsigned)missing);
{ size_t const errorCode = ZSTD_compressCCtx(ctx, dstBuffer, tooSmallSize, sampleBuffer, sampleSize, cLevel);
CHECK(!ZSTD_isError(errorCode), "ZSTD_compressCCtx should have failed ! (buffer too small : %u < %u)", (unsigned)tooSmallSize, (unsigned)cSize); }
CHECK(ZSTD_getErrorCode(errorCode) != ZSTD_error_dstSize_tooSmall, "ZSTD_compressCCtx should have failed ! (buffer too small : %u < %u)", (unsigned)tooSmallSize, (unsigned)cSize); }
{ unsigned endCheck; memcpy(&endCheck, dstBuffer+tooSmallSize, sizeof(endCheck));
CHECK(endCheck != endMark, "ZSTD_compressCCtx : dst buffer overflow (check.%08X != %08X.mark)", endCheck, endMark); }
} }
@ -3100,7 +3139,7 @@ static int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, U32 const
static const BYTE token = 0xA9;
dstBuffer[tooSmallSize] = token;
{ size_t const errorCode = ZSTD_decompress(dstBuffer, tooSmallSize, cBuffer, cSize);
CHECK(!ZSTD_isError(errorCode), "ZSTD_decompress should have failed : %u > %u (dst buffer too small)", (unsigned)errorCode, (unsigned)tooSmallSize); }
CHECK(ZSTD_getErrorCode(errorCode) != ZSTD_error_dstSize_tooSmall, "ZSTD_decompress should have failed : %u > %u (dst buffer too small)", (unsigned)errorCode, (unsigned)tooSmallSize); }
CHECK(dstBuffer[tooSmallSize] != token, "ZSTD_decompress : dst buffer overflow");
}

View File

@ -260,6 +260,13 @@ zstd tmp -c --compress-literals --fast=1 | zstd -t
zstd tmp -c --compress-literals -19 | zstd -t
zstd -b --fast=1 -i0e1 tmp --compress-literals
zstd -b --fast=1 -i0e1 tmp --no-compress-literals
println "test: --no-check for decompression"
zstd -f tmp -o tmp_corrupt.zst --check
zstd -f tmp -o tmp.zst --no-check
printf '\xDE\xAD\xBE\xEF' | dd of=tmp_corrupt.zst bs=1 seek=$(($(wc -c < "tmp_corrupt.zst") - 4)) count=4 conv=notrunc # corrupt checksum in tmp
zstd -d -f tmp_corrupt.zst --no-check
zstd -d -f tmp_corrupt.zst --check --no-check # final flag overrides
zstd -d -f tmp.zst --no-check
println "\n===> zstdgrep tests"
ln -sf "$ZSTD_BIN" zstdcat
@ -361,7 +368,7 @@ zstd tmp1.zst tmp2.zst -o "$INTOVOID" -f
zstd -d tmp1.zst tmp2.zst -o tmp
touch tmpexists
zstd tmp1 tmp2 -f -o tmpexists
zstd tmp1 tmp2 -o tmpexists && die "should have refused to overwrite"
zstd tmp1 tmp2 -q -o tmpexists && die "should have refused to overwrite"
println gooder > tmp_rm1
println boi > tmp_rm2
println worldly > tmp_rm3

File diff suppressed because it is too large Load Diff