updated huff0
This commit is contained in:
parent
a5c2c08c68
commit
516ba88022
185
lib/huff0.c
185
lib/huff0.c
@ -1,6 +1,6 @@
|
|||||||
/* ******************************************************************
|
/* ******************************************************************
|
||||||
Huff0 : Huffman coder, part of New Generation Entropy library
|
Huff0 : Huffman coder, part of New Generation Entropy library
|
||||||
Copyright (C) 2013-2015, Yann Collet.
|
Copyright (C) 2013-2016, Yann Collet.
|
||||||
|
|
||||||
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
@ -103,8 +103,7 @@ typedef struct nodeElt_s {
|
|||||||
} nodeElt;
|
} nodeElt;
|
||||||
|
|
||||||
/*! HUF_writeCTable() :
|
/*! HUF_writeCTable() :
|
||||||
@dst : destination buffer
|
`CTable` : huffman tree to save, using huff0 representation.
|
||||||
@CTable : huffman tree to save, using huff0 representation
|
|
||||||
@return : size of saved CTable */
|
@return : size of saved CTable */
|
||||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
|
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
|
||||||
const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
|
const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
|
||||||
@ -181,66 +180,58 @@ size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, si
|
|||||||
BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
|
BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
|
||||||
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
|
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
|
||||||
U32 tableLog = 0;
|
U32 tableLog = 0;
|
||||||
size_t iSize;
|
size_t readSize;
|
||||||
U32 nbSymbols = 0;
|
U32 nbSymbols = 0;
|
||||||
U32 n;
|
|
||||||
U32 nextRankStart;
|
|
||||||
//memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
|
//memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
|
||||||
|
|
||||||
/* get symbol weights */
|
/* get symbol weights */
|
||||||
iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE+1, rankVal, &nbSymbols, &tableLog, src, srcSize);
|
readSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE+1, rankVal, &nbSymbols, &tableLog, src, srcSize);
|
||||||
if (HUF_isError(iSize)) return iSize;
|
if (HUF_isError(readSize)) return readSize;
|
||||||
|
|
||||||
/* check result */
|
/* check result */
|
||||||
if (tableLog > HUF_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
if (tableLog > HUF_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
|
||||||
if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall);
|
if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall);
|
||||||
|
|
||||||
/* Prepare base value per rank */
|
/* Prepare base value per rank */
|
||||||
nextRankStart = 0;
|
{ U32 n, nextRankStart = 0;
|
||||||
for (n=1; n<=tableLog; n++) {
|
for (n=1; n<=tableLog; n++) {
|
||||||
U32 current = nextRankStart;
|
U32 current = nextRankStart;
|
||||||
nextRankStart += (rankVal[n] << (n-1));
|
nextRankStart += (rankVal[n] << (n-1));
|
||||||
rankVal[n] = current;
|
rankVal[n] = current;
|
||||||
}
|
} }
|
||||||
|
|
||||||
/* fill nbBits */
|
/* fill nbBits */
|
||||||
for (n=0; n<nbSymbols; n++) {
|
{ U32 n; for (n=0; n<nbSymbols; n++) {
|
||||||
const U32 w = huffWeight[n];
|
const U32 w = huffWeight[n];
|
||||||
CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
|
CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
|
||||||
}
|
}}
|
||||||
|
|
||||||
/* fill val */
|
/* fill val */
|
||||||
{
|
{ U16 nbPerRank[HUF_MAX_TABLELOG+1] = {0};
|
||||||
U16 nbPerRank[HUF_MAX_TABLELOG+1] = {0};
|
|
||||||
U16 valPerRank[HUF_MAX_TABLELOG+1] = {0};
|
U16 valPerRank[HUF_MAX_TABLELOG+1] = {0};
|
||||||
for (n=0; n<nbSymbols; n++)
|
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
|
||||||
nbPerRank[CTable[n].nbBits]++;
|
|
||||||
{
|
|
||||||
/* determine stating value per rank */
|
/* determine stating value per rank */
|
||||||
U16 min = 0;
|
{ U16 min = 0;
|
||||||
for (n=HUF_MAX_TABLELOG; n>0; n--) {
|
U32 n; for (n=HUF_MAX_TABLELOG; n>0; n--) {
|
||||||
valPerRank[n] = min; /* get starting value within each rank */
|
valPerRank[n] = min; /* get starting value within each rank */
|
||||||
min += nbPerRank[n];
|
min += nbPerRank[n];
|
||||||
min >>= 1;
|
min >>= 1;
|
||||||
} }
|
} }
|
||||||
for (n=0; n<=maxSymbolValue; n++)
|
/* assign value within rank, symbol order */
|
||||||
CTable[n].val = valPerRank[CTable[n].nbBits]++; /* assign value within rank, symbol order */
|
{ U32 n; for (n=0; n<=maxSymbolValue; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
|
||||||
}
|
}
|
||||||
|
|
||||||
return iSize;
|
return readSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
|
static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
|
||||||
{
|
{
|
||||||
int totalCost = 0;
|
|
||||||
const U32 largestBits = huffNode[lastNonNull].nbBits;
|
const U32 largestBits = huffNode[lastNonNull].nbBits;
|
||||||
|
if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */
|
||||||
/* early exit : all is fine */
|
|
||||||
if (largestBits <= maxNbBits) return largestBits;
|
|
||||||
|
|
||||||
/* there are several too large elements (at least >= 2) */
|
/* there are several too large elements (at least >= 2) */
|
||||||
{
|
{ int totalCost = 0;
|
||||||
const U32 baseCost = 1 << (largestBits - maxNbBits);
|
const U32 baseCost = 1 << (largestBits - maxNbBits);
|
||||||
U32 n = lastNonNull;
|
U32 n = lastNonNull;
|
||||||
|
|
||||||
@ -249,25 +240,24 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
|
|||||||
huffNode[n].nbBits = (BYTE)maxNbBits;
|
huffNode[n].nbBits = (BYTE)maxNbBits;
|
||||||
n --;
|
n --;
|
||||||
} /* n stops at huffNode[n].nbBits <= maxNbBits */
|
} /* n stops at huffNode[n].nbBits <= maxNbBits */
|
||||||
while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using (maxNbBits-1) */
|
while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */
|
||||||
|
|
||||||
/* renorm totalCost */
|
/* renorm totalCost */
|
||||||
totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
|
totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
|
||||||
|
|
||||||
/* repay normalized cost */
|
/* repay normalized cost */
|
||||||
{
|
{ U32 const noSymbol = 0xF0F0F0F0;
|
||||||
const U32 noSymbol = 0xF0F0F0F0;
|
|
||||||
U32 rankLast[HUF_MAX_TABLELOG+1];
|
U32 rankLast[HUF_MAX_TABLELOG+1];
|
||||||
U32 currentNbBits = maxNbBits;
|
|
||||||
int pos;
|
int pos;
|
||||||
|
|
||||||
/* Get pos of last (smallest) symbol per rank */
|
/* Get pos of last (smallest) symbol per rank */
|
||||||
memset(rankLast, 0xF0, sizeof(rankLast));
|
memset(rankLast, 0xF0, sizeof(rankLast));
|
||||||
|
{ U32 currentNbBits = maxNbBits;
|
||||||
for (pos=n ; pos >= 0; pos--) {
|
for (pos=n ; pos >= 0; pos--) {
|
||||||
if (huffNode[pos].nbBits >= currentNbBits) continue;
|
if (huffNode[pos].nbBits >= currentNbBits) continue;
|
||||||
currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
|
currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
|
||||||
rankLast[maxNbBits-currentNbBits] = pos;
|
rankLast[maxNbBits-currentNbBits] = pos;
|
||||||
}
|
} }
|
||||||
|
|
||||||
while (totalCost > 0) {
|
while (totalCost > 0) {
|
||||||
U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
|
U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
|
||||||
@ -276,9 +266,8 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
|
|||||||
U32 lowPos = rankLast[nBitsToDecrease-1];
|
U32 lowPos = rankLast[nBitsToDecrease-1];
|
||||||
if (highPos == noSymbol) continue;
|
if (highPos == noSymbol) continue;
|
||||||
if (lowPos == noSymbol) break;
|
if (lowPos == noSymbol) break;
|
||||||
{
|
{ U32 const highTotal = huffNode[highPos].count;
|
||||||
U32 highTotal = huffNode[highPos].count;
|
U32 const lowTotal = 2 * huffNode[lowPos].count;
|
||||||
U32 lowTotal = 2 * huffNode[lowPos].count;
|
|
||||||
if (highTotal <= lowTotal) break;
|
if (highTotal <= lowTotal) break;
|
||||||
} }
|
} }
|
||||||
/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
|
/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
|
||||||
@ -294,7 +283,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
|
|||||||
rankLast[nBitsToDecrease]--;
|
rankLast[nBitsToDecrease]--;
|
||||||
if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
|
if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
|
||||||
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
|
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
|
||||||
} }
|
} } /* while (totalCost > 0) */
|
||||||
|
|
||||||
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
|
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
|
||||||
if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
|
if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
|
||||||
@ -307,7 +296,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
|
|||||||
huffNode[ rankLast[1] + 1 ].nbBits--;
|
huffNode[ rankLast[1] + 1 ].nbBits--;
|
||||||
rankLast[1]++;
|
rankLast[1]++;
|
||||||
totalCost ++;
|
totalCost ++;
|
||||||
} } }
|
} } } /* there are several too large elements (at least >= 2) */
|
||||||
|
|
||||||
return maxNbBits;
|
return maxNbBits;
|
||||||
}
|
}
|
||||||
@ -331,8 +320,8 @@ static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
|
|||||||
for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
|
for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
|
||||||
for (n=0; n<32; n++) rank[n].current = rank[n].base;
|
for (n=0; n<32; n++) rank[n].current = rank[n].base;
|
||||||
for (n=0; n<=maxSymbolValue; n++) {
|
for (n=0; n<=maxSymbolValue; n++) {
|
||||||
U32 c = count[n];
|
U32 const c = count[n];
|
||||||
U32 r = BIT_highbit32(c+1) + 1;
|
U32 const r = BIT_highbit32(c+1) + 1;
|
||||||
U32 pos = rank[r].current++;
|
U32 pos = rank[r].current++;
|
||||||
while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--;
|
while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--;
|
||||||
huffNode[pos].count = c;
|
huffNode[pos].count = c;
|
||||||
@ -389,21 +378,18 @@ size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U3
|
|||||||
maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
|
maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
|
||||||
|
|
||||||
/* fill result into tree (val, nbBits) */
|
/* fill result into tree (val, nbBits) */
|
||||||
{
|
{ U16 nbPerRank[HUF_MAX_TABLELOG+1] = {0};
|
||||||
U16 nbPerRank[HUF_MAX_TABLELOG+1] = {0};
|
|
||||||
U16 valPerRank[HUF_MAX_TABLELOG+1] = {0};
|
U16 valPerRank[HUF_MAX_TABLELOG+1] = {0};
|
||||||
if (maxNbBits > HUF_MAX_TABLELOG) return ERROR(GENERIC); /* check fit into table */
|
if (maxNbBits > HUF_MAX_TABLELOG) return ERROR(GENERIC); /* check fit into table */
|
||||||
for (n=0; n<=nonNullRank; n++)
|
for (n=0; n<=nonNullRank; n++)
|
||||||
nbPerRank[huffNode[n].nbBits]++;
|
nbPerRank[huffNode[n].nbBits]++;
|
||||||
{
|
|
||||||
/* determine stating value per rank */
|
/* determine stating value per rank */
|
||||||
U16 min = 0;
|
{ U16 min = 0;
|
||||||
for (n=maxNbBits; n>0; n--) {
|
for (n=maxNbBits; n>0; n--) {
|
||||||
valPerRank[n] = min; /* get starting value within each rank */
|
valPerRank[n] = min; /* get starting value within each rank */
|
||||||
min += nbPerRank[n];
|
min += nbPerRank[n];
|
||||||
min >>= 1;
|
min >>= 1;
|
||||||
}
|
} }
|
||||||
}
|
|
||||||
for (n=0; n<=maxSymbolValue; n++)
|
for (n=0; n<=maxSymbolValue; n++)
|
||||||
tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
|
tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
|
||||||
for (n=0; n<=maxSymbolValue; n++)
|
for (n=0; n<=maxSymbolValue; n++)
|
||||||
@ -432,17 +418,16 @@ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, si
|
|||||||
{
|
{
|
||||||
const BYTE* ip = (const BYTE*) src;
|
const BYTE* ip = (const BYTE*) src;
|
||||||
BYTE* const ostart = (BYTE*)dst;
|
BYTE* const ostart = (BYTE*)dst;
|
||||||
BYTE* op = ostart;
|
|
||||||
BYTE* const oend = ostart + dstSize;
|
BYTE* const oend = ostart + dstSize;
|
||||||
|
BYTE* op = ostart;
|
||||||
size_t n;
|
size_t n;
|
||||||
const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize));
|
const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize));
|
||||||
size_t errorCode;
|
|
||||||
BIT_CStream_t bitC;
|
BIT_CStream_t bitC;
|
||||||
|
|
||||||
/* init */
|
/* init */
|
||||||
if (dstSize < 8) return 0; /* not enough space to compress */
|
if (dstSize < 8) return 0; /* not enough space to compress */
|
||||||
errorCode = BIT_initCStream(&bitC, op, oend-op);
|
{ size_t const errorCode = BIT_initCStream(&bitC, op, oend-op);
|
||||||
if (HUF_isError(errorCode)) return 0;
|
if (HUF_isError(errorCode)) return 0; }
|
||||||
|
|
||||||
n = srcSize & ~3; /* join to mod 4 */
|
n = srcSize & ~3; /* join to mod 4 */
|
||||||
switch (srcSize & 3)
|
switch (srcSize & 3)
|
||||||
@ -475,12 +460,12 @@ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, si
|
|||||||
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
|
||||||
{
|
{
|
||||||
size_t segmentSize = (srcSize+3)/4; /* first 3 segments */
|
size_t segmentSize = (srcSize+3)/4; /* first 3 segments */
|
||||||
size_t errorCode;
|
|
||||||
const BYTE* ip = (const BYTE*) src;
|
const BYTE* ip = (const BYTE*) src;
|
||||||
const BYTE* const iend = ip + srcSize;
|
const BYTE* const iend = ip + srcSize;
|
||||||
BYTE* const ostart = (BYTE*) dst;
|
BYTE* const ostart = (BYTE*) dst;
|
||||||
BYTE* op = ostart;
|
|
||||||
BYTE* const oend = ostart + dstSize;
|
BYTE* const oend = ostart + dstSize;
|
||||||
|
BYTE* op = ostart;
|
||||||
|
size_t errorCode;
|
||||||
|
|
||||||
if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
|
if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
|
||||||
if (srcSize < 12) return 0; /* no saving possible : too small input */
|
if (srcSize < 12) return 0; /* no saving possible : too small input */
|
||||||
@ -523,8 +508,8 @@ static size_t HUF_compress_internal (
|
|||||||
unsigned singleStream)
|
unsigned singleStream)
|
||||||
{
|
{
|
||||||
BYTE* const ostart = (BYTE*)dst;
|
BYTE* const ostart = (BYTE*)dst;
|
||||||
BYTE* op = ostart;
|
|
||||||
BYTE* const oend = ostart + dstSize;
|
BYTE* const oend = ostart + dstSize;
|
||||||
|
BYTE* op = ostart;
|
||||||
|
|
||||||
U32 count[HUF_MAX_SYMBOL_VALUE+1];
|
U32 count[HUF_MAX_SYMBOL_VALUE+1];
|
||||||
HUF_CElt CTable[HUF_MAX_SYMBOL_VALUE+1];
|
HUF_CElt CTable[HUF_MAX_SYMBOL_VALUE+1];
|
||||||
@ -602,9 +587,9 @@ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* doubl
|
|||||||
|
|
||||||
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
|
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
|
||||||
|
|
||||||
/*! HUF_readStats
|
/*! HUF_readStats() :
|
||||||
Read compact Huffman tree, saved by HUF_writeCTable
|
Read compact Huffman tree, saved by HUF_writeCTable().
|
||||||
@huffWeight : destination buffer
|
`huffWeight` is destination buffer.
|
||||||
@return : size read from `src`
|
@return : size read from `src`
|
||||||
*/
|
*/
|
||||||
static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
||||||
@ -616,13 +601,12 @@ static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
|||||||
const BYTE* ip = (const BYTE*) src;
|
const BYTE* ip = (const BYTE*) src;
|
||||||
size_t iSize = ip[0];
|
size_t iSize = ip[0];
|
||||||
size_t oSize;
|
size_t oSize;
|
||||||
U32 n;
|
|
||||||
|
|
||||||
//memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
|
//memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
|
||||||
|
|
||||||
if (iSize >= 128) { /* special header */
|
if (iSize >= 128) { /* special header */
|
||||||
if (iSize >= (242)) { /* RLE */
|
if (iSize >= (242)) { /* RLE */
|
||||||
static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
|
static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
|
||||||
oSize = l[iSize-242];
|
oSize = l[iSize-242];
|
||||||
memset(huffWeight, 1, hwSize);
|
memset(huffWeight, 1, hwSize);
|
||||||
iSize = 0;
|
iSize = 0;
|
||||||
@ -633,10 +617,11 @@ static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
|||||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||||
if (oSize >= hwSize) return ERROR(corruption_detected);
|
if (oSize >= hwSize) return ERROR(corruption_detected);
|
||||||
ip += 1;
|
ip += 1;
|
||||||
|
{ U32 n;
|
||||||
for (n=0; n<oSize; n+=2) {
|
for (n=0; n<oSize; n+=2) {
|
||||||
huffWeight[n] = ip[n/2] >> 4;
|
huffWeight[n] = ip[n/2] >> 4;
|
||||||
huffWeight[n+1] = ip[n/2] & 15;
|
huffWeight[n+1] = ip[n/2] & 15;
|
||||||
} } }
|
} } } }
|
||||||
else { /* header compressed with FSE (normal case) */
|
else { /* header compressed with FSE (normal case) */
|
||||||
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
|
||||||
oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
|
oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
|
||||||
@ -646,20 +631,20 @@ static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
|
|||||||
/* collect weight stats */
|
/* collect weight stats */
|
||||||
memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
|
memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
|
||||||
weightTotal = 0;
|
weightTotal = 0;
|
||||||
for (n=0; n<oSize; n++) {
|
{ U32 n; for (n=0; n<oSize; n++) {
|
||||||
if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
|
if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
|
||||||
rankStats[huffWeight[n]]++;
|
rankStats[huffWeight[n]]++;
|
||||||
weightTotal += (1 << huffWeight[n]) >> 1;
|
weightTotal += (1 << huffWeight[n]) >> 1;
|
||||||
}
|
}}
|
||||||
|
|
||||||
/* get last non-null symbol weight (implied, total must be 2^n) */
|
/* get last non-null symbol weight (implied, total must be 2^n) */
|
||||||
tableLog = BIT_highbit32(weightTotal) + 1;
|
tableLog = BIT_highbit32(weightTotal) + 1;
|
||||||
if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
|
if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
|
||||||
{ /* determine last weight */
|
/* determine last weight */
|
||||||
U32 total = 1 << tableLog;
|
{ U32 const total = 1 << tableLog;
|
||||||
U32 rest = total - weightTotal;
|
U32 const rest = total - weightTotal;
|
||||||
U32 verif = 1 << BIT_highbit32(rest);
|
U32 const verif = 1 << BIT_highbit32(rest);
|
||||||
U32 lastWeight = BIT_highbit32(rest) + 1;
|
U32 const lastWeight = BIT_highbit32(rest) + 1;
|
||||||
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
|
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
|
||||||
huffWeight[oSize] = (BYTE)lastWeight;
|
huffWeight[oSize] = (BYTE)lastWeight;
|
||||||
rankStats[lastWeight]++;
|
rankStats[lastWeight]++;
|
||||||
@ -724,6 +709,7 @@ size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
|
|||||||
return iSize;
|
return iSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
|
static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
|
||||||
{
|
{
|
||||||
const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
|
const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
|
||||||
@ -773,13 +759,13 @@ size_t HUF_decompress1X2_usingDTable(
|
|||||||
{
|
{
|
||||||
BYTE* op = (BYTE*)dst;
|
BYTE* op = (BYTE*)dst;
|
||||||
BYTE* const oend = op + dstSize;
|
BYTE* const oend = op + dstSize;
|
||||||
size_t errorCode;
|
|
||||||
const U32 dtLog = DTable[0];
|
const U32 dtLog = DTable[0];
|
||||||
const void* dtPtr = DTable;
|
const void* dtPtr = DTable;
|
||||||
const HUF_DEltX2* const dt = ((const HUF_DEltX2*)dtPtr)+1;
|
const HUF_DEltX2* const dt = ((const HUF_DEltX2*)dtPtr)+1;
|
||||||
BIT_DStream_t bitD;
|
BIT_DStream_t bitD;
|
||||||
errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
|
|
||||||
if (HUF_isError(errorCode)) return errorCode;
|
{ size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
|
||||||
|
if (HUF_isError(errorCode)) return errorCode; }
|
||||||
|
|
||||||
HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
|
HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
|
||||||
|
|
||||||
@ -793,9 +779,8 @@ size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS
|
|||||||
{
|
{
|
||||||
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
|
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
|
||||||
const BYTE* ip = (const BYTE*) cSrc;
|
const BYTE* ip = (const BYTE*) cSrc;
|
||||||
size_t errorCode;
|
|
||||||
|
|
||||||
errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
|
size_t const errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
|
||||||
if (HUF_isError(errorCode)) return errorCode;
|
if (HUF_isError(errorCode)) return errorCode;
|
||||||
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
|
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
|
||||||
ip += errorCode;
|
ip += errorCode;
|
||||||
@ -812,8 +797,8 @@ size_t HUF_decompress4X2_usingDTable(
|
|||||||
{
|
{
|
||||||
/* Check */
|
/* Check */
|
||||||
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
||||||
{
|
|
||||||
const BYTE* const istart = (const BYTE*) cSrc;
|
{ const BYTE* const istart = (const BYTE*) cSrc;
|
||||||
BYTE* const ostart = (BYTE*) dst;
|
BYTE* const ostart = (BYTE*) dst;
|
||||||
BYTE* const oend = ostart + dstSize;
|
BYTE* const oend = ostart + dstSize;
|
||||||
const void* const dtPtr = DTable;
|
const void* const dtPtr = DTable;
|
||||||
@ -903,9 +888,8 @@ size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS
|
|||||||
{
|
{
|
||||||
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
|
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
|
||||||
const BYTE* ip = (const BYTE*) cSrc;
|
const BYTE* ip = (const BYTE*) cSrc;
|
||||||
size_t errorCode;
|
|
||||||
|
|
||||||
errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
|
size_t const errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
|
||||||
if (HUF_isError(errorCode)) return errorCode;
|
if (HUF_isError(errorCode)) return errorCode;
|
||||||
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
|
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
|
||||||
ip += errorCode;
|
ip += errorCode;
|
||||||
@ -926,7 +910,6 @@ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 co
|
|||||||
{
|
{
|
||||||
HUF_DEltX4 DElt;
|
HUF_DEltX4 DElt;
|
||||||
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
|
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
|
||||||
U32 s;
|
|
||||||
|
|
||||||
/* get pre-calculated rankVal */
|
/* get pre-calculated rankVal */
|
||||||
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
|
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
|
||||||
@ -942,7 +925,7 @@ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 co
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* fill DTable */
|
/* fill DTable */
|
||||||
for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
|
{ U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
|
||||||
const U32 symbol = sortedSymbols[s].symbol;
|
const U32 symbol = sortedSymbols[s].symbol;
|
||||||
const U32 weight = sortedSymbols[s].weight;
|
const U32 weight = sortedSymbols[s].weight;
|
||||||
const U32 nbBits = nbBitsBaseline - weight;
|
const U32 nbBits = nbBitsBaseline - weight;
|
||||||
@ -957,7 +940,7 @@ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 co
|
|||||||
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
|
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
|
||||||
|
|
||||||
rankVal[weight] += length;
|
rankVal[weight] += length;
|
||||||
}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
|
typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
|
||||||
@ -992,16 +975,14 @@ static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
|
|||||||
sortedList+sortedRank, sortedListSize-sortedRank,
|
sortedList+sortedRank, sortedListSize-sortedRank,
|
||||||
nbBitsBaseline, symbol);
|
nbBitsBaseline, symbol);
|
||||||
} else {
|
} else {
|
||||||
U32 i;
|
|
||||||
const U32 end = start + length;
|
|
||||||
HUF_DEltX4 DElt;
|
HUF_DEltX4 DElt;
|
||||||
|
|
||||||
MEM_writeLE16(&(DElt.sequence), symbol);
|
MEM_writeLE16(&(DElt.sequence), symbol);
|
||||||
DElt.nbBits = (BYTE)(nbBits);
|
DElt.nbBits = (BYTE)(nbBits);
|
||||||
DElt.length = 1;
|
DElt.length = 1;
|
||||||
for (i = start; i < end; i++)
|
{ U32 u;
|
||||||
DTable[i] = DElt;
|
const U32 end = start + length;
|
||||||
}
|
for (u = start; u < end; u++) DTable[u] = DElt;
|
||||||
|
} }
|
||||||
rankVal[weight] += length;
|
rankVal[weight] += length;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1034,8 +1015,7 @@ size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
|
|||||||
for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
|
for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
|
||||||
|
|
||||||
/* Get start index of each weight */
|
/* Get start index of each weight */
|
||||||
{
|
{ U32 w, nextRankStart = 0;
|
||||||
U32 w, nextRankStart = 0;
|
|
||||||
for (w=1; w<=maxW; w++) {
|
for (w=1; w<=maxW; w++) {
|
||||||
U32 current = nextRankStart;
|
U32 current = nextRankStart;
|
||||||
nextRankStart += rankStats[w];
|
nextRankStart += rankStats[w];
|
||||||
@ -1046,8 +1026,7 @@ size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* sort symbols by weight */
|
/* sort symbols by weight */
|
||||||
{
|
{ U32 s;
|
||||||
U32 s;
|
|
||||||
for (s=0; s<nbSymbols; s++) {
|
for (s=0; s<nbSymbols; s++) {
|
||||||
U32 w = weightList[s];
|
U32 w = weightList[s];
|
||||||
U32 r = rankStart[w]++;
|
U32 r = rankStart[w]++;
|
||||||
@ -1058,8 +1037,7 @@ size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Build rankVal */
|
/* Build rankVal */
|
||||||
{
|
{ const U32 minBits = tableLog+1 - maxW;
|
||||||
const U32 minBits = tableLog+1 - maxW;
|
|
||||||
U32 nextRankVal = 0;
|
U32 nextRankVal = 0;
|
||||||
U32 w, consumed;
|
U32 w, consumed;
|
||||||
const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
|
const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
|
||||||
@ -1156,14 +1134,13 @@ size_t HUF_decompress1X4_usingDTable(
|
|||||||
const U32 dtLog = DTable[0];
|
const U32 dtLog = DTable[0];
|
||||||
const void* const dtPtr = DTable;
|
const void* const dtPtr = DTable;
|
||||||
const HUF_DEltX4* const dt = ((const HUF_DEltX4*)dtPtr) +1;
|
const HUF_DEltX4* const dt = ((const HUF_DEltX4*)dtPtr) +1;
|
||||||
size_t errorCode;
|
|
||||||
|
|
||||||
/* Init */
|
/* Init */
|
||||||
BIT_DStream_t bitD;
|
BIT_DStream_t bitD;
|
||||||
errorCode = BIT_initDStream(&bitD, istart, cSrcSize);
|
{ size_t const errorCode = BIT_initDStream(&bitD, istart, cSrcSize);
|
||||||
if (HUF_isError(errorCode)) return errorCode;
|
if (HUF_isError(errorCode)) return errorCode; }
|
||||||
|
|
||||||
/* finish bitStreams one by one */
|
/* decode */
|
||||||
HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtLog);
|
HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtLog);
|
||||||
|
|
||||||
/* check */
|
/* check */
|
||||||
@ -1178,7 +1155,7 @@ size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cS
|
|||||||
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
|
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
|
||||||
const BYTE* ip = (const BYTE*) cSrc;
|
const BYTE* ip = (const BYTE*) cSrc;
|
||||||
|
|
||||||
size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
|
size_t const hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
|
||||||
if (HUF_isError(hSize)) return hSize;
|
if (HUF_isError(hSize)) return hSize;
|
||||||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||||
ip += hSize;
|
ip += hSize;
|
||||||
@ -1194,8 +1171,7 @@ size_t HUF_decompress4X4_usingDTable(
|
|||||||
{
|
{
|
||||||
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
||||||
|
|
||||||
{
|
{ const BYTE* const istart = (const BYTE*) cSrc;
|
||||||
const BYTE* const istart = (const BYTE*) cSrc;
|
|
||||||
BYTE* const ostart = (BYTE*) dst;
|
BYTE* const ostart = (BYTE*) dst;
|
||||||
BYTE* const oend = ostart + dstSize;
|
BYTE* const oend = ostart + dstSize;
|
||||||
const void* const dtPtr = DTable;
|
const void* const dtPtr = DTable;
|
||||||
@ -1385,8 +1361,7 @@ size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
|
|||||||
for (maxW = tableLog; maxW && rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
|
for (maxW = tableLog; maxW && rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
|
||||||
|
|
||||||
/* Get start index of each weight */
|
/* Get start index of each weight */
|
||||||
{
|
{ U32 w, nextRankStart = 0;
|
||||||
U32 w, nextRankStart = 0;
|
|
||||||
for (w=1; w<=maxW; w++) {
|
for (w=1; w<=maxW; w++) {
|
||||||
U32 current = nextRankStart;
|
U32 current = nextRankStart;
|
||||||
nextRankStart += rankStats[w];
|
nextRankStart += rankStats[w];
|
||||||
@ -1397,8 +1372,7 @@ size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* sort symbols by weight */
|
/* sort symbols by weight */
|
||||||
{
|
{ U32 s;
|
||||||
U32 s;
|
|
||||||
for (s=0; s<nbSymbols; s++) {
|
for (s=0; s<nbSymbols; s++) {
|
||||||
U32 w = weightList[s];
|
U32 w = weightList[s];
|
||||||
U32 r = rankStart[w]++;
|
U32 r = rankStart[w]++;
|
||||||
@ -1409,8 +1383,7 @@ size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Build rankVal */
|
/* Build rankVal */
|
||||||
{
|
{ const U32 minBits = tableLog+1 - maxW;
|
||||||
const U32 minBits = tableLog+1 - maxW;
|
|
||||||
U32 nextRankVal = 0;
|
U32 nextRankVal = 0;
|
||||||
U32 w, consumed;
|
U32 w, consumed;
|
||||||
const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
|
const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
|
||||||
@ -1427,8 +1400,7 @@ size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
|
|||||||
} } }
|
} } }
|
||||||
|
|
||||||
/* fill tables */
|
/* fill tables */
|
||||||
{
|
{ void* ddPtr = DTable+1;
|
||||||
void* ddPtr = DTable+1;
|
|
||||||
HUF_DDescX6* DDescription = (HUF_DDescX6*)ddPtr;
|
HUF_DDescX6* DDescription = (HUF_DDescX6*)ddPtr;
|
||||||
void* dsPtr = DTable + 1 + ((size_t)1<<(memLog-1));
|
void* dsPtr = DTable + 1 + ((size_t)1<<(memLog-1));
|
||||||
HUF_DSeqX6* DSequence = (HUF_DSeqX6*)dsPtr;
|
HUF_DSeqX6* DSequence = (HUF_DSeqX6*)dsPtr;
|
||||||
@ -1563,8 +1535,7 @@ size_t HUF_decompress4X6_usingDTable(
|
|||||||
/* Check */
|
/* Check */
|
||||||
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
|
||||||
|
|
||||||
{
|
{ const BYTE* const istart = (const BYTE*) cSrc;
|
||||||
const BYTE* const istart = (const BYTE*) cSrc;
|
|
||||||
BYTE* const ostart = (BYTE*) dst;
|
BYTE* const ostart = (BYTE*) dst;
|
||||||
BYTE* const oend = ostart + dstSize;
|
BYTE* const oend = ostart + dstSize;
|
||||||
|
|
||||||
@ -1659,7 +1630,7 @@ size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cS
|
|||||||
HUF_CREATE_STATIC_DTABLEX6(DTable, HUF_MAX_TABLELOG);
|
HUF_CREATE_STATIC_DTABLEX6(DTable, HUF_MAX_TABLELOG);
|
||||||
const BYTE* ip = (const BYTE*) cSrc;
|
const BYTE* ip = (const BYTE*) cSrc;
|
||||||
|
|
||||||
size_t hSize = HUF_readDTableX6 (DTable, cSrc, cSrcSize);
|
size_t const hSize = HUF_readDTableX6 (DTable, cSrc, cSrcSize);
|
||||||
if (HUF_isError(hSize)) return hSize;
|
if (HUF_isError(hSize)) return hSize;
|
||||||
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
|
||||||
ip += hSize;
|
ip += hSize;
|
||||||
|
18
lib/huff0.h
18
lib/huff0.h
@ -48,24 +48,24 @@ extern "C" {
|
|||||||
/* ****************************************
|
/* ****************************************
|
||||||
* Huff0 simple functions
|
* Huff0 simple functions
|
||||||
******************************************/
|
******************************************/
|
||||||
size_t HUF_compress(void* dst, size_t maxDstSize,
|
size_t HUF_compress(void* dst, size_t dstCapacity,
|
||||||
const void* src, size_t srcSize);
|
const void* src, size_t srcSize);
|
||||||
size_t HUF_decompress(void* dst, size_t dstSize,
|
size_t HUF_decompress(void* dst, size_t dstSize,
|
||||||
const void* cSrc, size_t cSrcSize);
|
const void* cSrc, size_t cSrcSize);
|
||||||
/*!
|
/*
|
||||||
HUF_compress() :
|
HUF_compress() :
|
||||||
Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
|
Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
|
||||||
'dst' buffer must be already allocated. Compression runs faster if maxDstSize >= HUF_compressBound(srcSize).
|
'dst' buffer must be already allocated. Compression runs faster if dstCapacity >= HUF_compressBound(srcSize).
|
||||||
Note : srcSize must be <= 128 KB
|
Note : srcSize must be <= 128 KB
|
||||||
@return : size of compressed data (<= maxDstSize)
|
@return : size of compressed data (<= dstCapacity)
|
||||||
Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
|
Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
|
||||||
if return == 1, srcData is a single repeated byte symbol (RLE compression)
|
if return == 1, srcData is a single repeated byte symbol (RLE compression).
|
||||||
if HUF_isError(return), compression failed (more details using HUF_getErrorName())
|
if HUF_isError(return), compression failed (more details using HUF_getErrorName())
|
||||||
|
|
||||||
HUF_decompress() :
|
HUF_decompress() :
|
||||||
Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
|
Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
|
||||||
into already allocated destination buffer 'dst', of size 'dstSize'.
|
into already allocated destination buffer 'dst', of size 'dstSize'.
|
||||||
@dstSize : must be the **exact** size of original (uncompressed) data.
|
`dstSize` : must be the **exact** size of original (uncompressed) data.
|
||||||
Note : in contrast with FSE, HUF_decompress can regenerate
|
Note : in contrast with FSE, HUF_decompress can regenerate
|
||||||
RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
|
RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
|
||||||
because it knows size to regenerate.
|
because it knows size to regenerate.
|
||||||
@ -77,11 +77,11 @@ HUF_decompress():
|
|||||||
/* ****************************************
|
/* ****************************************
|
||||||
* Tool functions
|
* Tool functions
|
||||||
******************************************/
|
******************************************/
|
||||||
size_t HUF_compressBound(size_t size); /* maximum compressed size */
|
size_t HUF_compressBound(size_t size); /**< maximum compressed size */
|
||||||
|
|
||||||
/* Error Management */
|
/* Error Management */
|
||||||
unsigned HUF_isError(size_t code); /* tells if a return value is an error code */
|
unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
|
||||||
const char* HUF_getErrorName(size_t code); /* provides error code string (useful for debugging) */
|
const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
|
||||||
|
|
||||||
|
|
||||||
/* ****************************************
|
/* ****************************************
|
||||||
|
@ -85,7 +85,7 @@ HUF_compress() does the following:
|
|||||||
1. count symbol occurrence from source[] into table count[] using FSE_count()
|
1. count symbol occurrence from source[] into table count[] using FSE_count()
|
||||||
2. build Huffman table from count using HUF_buildCTable()
|
2. build Huffman table from count using HUF_buildCTable()
|
||||||
3. save Huffman table to memory buffer using HUF_writeCTable()
|
3. save Huffman table to memory buffer using HUF_writeCTable()
|
||||||
4. encode the data stream using HUF_compress_usingCTable()
|
4. encode the data stream using HUF_compress4X_usingCTable()
|
||||||
|
|
||||||
The following API allows targeting specific sub-functions for advanced tasks.
|
The following API allows targeting specific sub-functions for advanced tasks.
|
||||||
For example, it's possible to compress several blocks using the same 'CTable',
|
For example, it's possible to compress several blocks using the same 'CTable',
|
||||||
@ -95,7 +95,7 @@ or to save and regenerate 'CTable' using external methods.
|
|||||||
typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
|
typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
|
||||||
size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);
|
size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);
|
||||||
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
|
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
|
||||||
size_t HUF_compress4X_into4Segments(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
|
||||||
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
|
Loading…
Reference in New Issue
Block a user