introduced LZ4HC_compress_hashChain
This commit is contained in:
parent
3f430daf7a
commit
c1ef7a177f
30
lib/lz4hc.c
30
lib/lz4hc.c
@ -305,8 +305,9 @@ FORCE_INLINE int LZ4HC_encodeSequence (
|
||||
return 0;
|
||||
}
|
||||
|
||||
#include "lz4opt.h"
|
||||
|
||||
static int LZ4HC_compress_generic (
|
||||
static int LZ4HC_compress_hashChain (
|
||||
LZ4HC_CCtx_internal* const ctx,
|
||||
const char* const source,
|
||||
char* const dest,
|
||||
@ -336,8 +337,6 @@ static int LZ4HC_compress_generic (
|
||||
const BYTE* ref0;
|
||||
|
||||
/* init */
|
||||
if (compressionLevel > LZ4HC_MAX_CLEVEL) compressionLevel = LZ4HC_MAX_CLEVEL;
|
||||
if (compressionLevel < 1) compressionLevel = LZ4HC_DEFAULT_CLEVEL;
|
||||
maxNbAttempts = 1 << (compressionLevel-1);
|
||||
ctx->end += inputSize;
|
||||
|
||||
@ -490,6 +489,31 @@ _Search3:
|
||||
}
|
||||
|
||||
|
||||
static int LZ4HC_compress_generic (
|
||||
LZ4HC_CCtx_internal* const ctx,
|
||||
const char* const source,
|
||||
char* const dest,
|
||||
int const inputSize,
|
||||
int const maxOutputSize,
|
||||
int compressionLevel,
|
||||
limitedOutput_directive limit
|
||||
)
|
||||
{
|
||||
if (compressionLevel < 1) compressionLevel = LZ4HC_DEFAULT_CLEVEL;
|
||||
if (compressionLevel > 16) {
|
||||
switch (compressionLevel) {
|
||||
case 17: ctx->searchNum = 64; return LZ4HC_compress_optimal(ctx, source, dest, inputSize, maxOutputSize, limit, 0, 64, 0);
|
||||
case 18: ctx->searchNum = 256; return LZ4HC_compress_optimal(ctx, source, dest, inputSize, maxOutputSize, limit, 0, 256, 0);
|
||||
case 19: ctx->searchNum = 64; return LZ4HC_compress_optimal(ctx, source, dest, inputSize, maxOutputSize, limit, 1, 64, 0);
|
||||
case 20:
|
||||
default: ctx->searchNum = 256; return LZ4HC_compress_optimal(ctx, source, dest, inputSize, maxOutputSize, limit, 1, 256, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return LZ4HC_compress_hashChain(ctx, source, dest, inputSize, maxOutputSize, compressionLevel, limit);
|
||||
}
|
||||
|
||||
|
||||
int LZ4_sizeofStateHC(void) { return sizeof(LZ4_streamHC_t); }
|
||||
|
||||
int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel)
|
||||
|
@ -46,7 +46,7 @@ extern "C" {
|
||||
/* --- Useful constants --- */
|
||||
#define LZ4HC_MIN_CLEVEL 3
|
||||
#define LZ4HC_DEFAULT_CLEVEL 9
|
||||
#define LZ4HC_MAX_CLEVEL 16
|
||||
#define LZ4HC_MAX_CLEVEL 20
|
||||
|
||||
|
||||
/*-************************************
|
||||
@ -58,7 +58,7 @@ extern "C" {
|
||||
* Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
|
||||
* Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
|
||||
* `compressionLevel` : Recommended values are between 4 and 9, although any value between 1 and LZ4HC_MAX_CLEVEL will work.
|
||||
* Values >LZ4HC_MAX_CLEVEL behave the same as 16.
|
||||
* Values >LZ4HC_MAX_CLEVEL behave the same as LZ4HC_MAX_CLEVEL.
|
||||
* @return : the number of bytes written into 'dst'
|
||||
* or 0 if compression fails.
|
||||
*/
|
||||
@ -153,6 +153,7 @@ typedef struct
|
||||
uint32_t dictLimit; /* below that point, need extDict */
|
||||
uint32_t lowLimit; /* below that point, no more dict */
|
||||
uint32_t nextToUpdate; /* index from which to continue dictionary update */
|
||||
uint32_t searchNum; /* only for optimal parser */
|
||||
uint32_t compressionLevel;
|
||||
} LZ4HC_CCtx_internal;
|
||||
|
||||
@ -169,12 +170,13 @@ typedef struct
|
||||
unsigned int dictLimit; /* below that point, need extDict */
|
||||
unsigned int lowLimit; /* below that point, no more dict */
|
||||
unsigned int nextToUpdate; /* index from which to continue dictionary update */
|
||||
unsigned int searchNum; /* only for optimal parser */
|
||||
unsigned int compressionLevel;
|
||||
} LZ4HC_CCtx_internal;
|
||||
|
||||
#endif
|
||||
|
||||
#define LZ4_STREAMHCSIZE 262192
|
||||
#define LZ4_STREAMHCSIZE 262200
|
||||
#define LZ4_STREAMHCSIZE_SIZET (LZ4_STREAMHCSIZE / sizeof(size_t))
|
||||
union LZ4_streamHC_u {
|
||||
size_t table[LZ4_STREAMHCSIZE_SIZET];
|
||||
|
Loading…
Reference in New Issue
Block a user