Merge branch 'dev' into lowAddr

This commit is contained in:
test4973 2018-04-11 16:45:19 -07:00
commit b183066793
3 changed files with 120 additions and 66 deletions

103
lib/lz4.c
View File

@ -89,6 +89,7 @@
/*-************************************
* Dependency
**************************************/
#define LZ4_STATIC_LINKING_ONLY
#include "lz4.h"
/* see also "memory routines" below */
@ -572,12 +573,10 @@ LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, const void* tableBas
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
LZ4_FORCE_INLINE void LZ4_prepareTable(
LZ4_stream_t_internal* const cctx,
const int inputSize,
const tableType_t tableType,
const dict_directive dictDirective) {
const tableType_t tableType) {
/* If the table hasn't been used, it's guaranteed to be zeroed out, and is
* therefore safe to use no matter what mode we're in. Otherwise, we figure
* out if it's safe to leave as is or whether it needs to be reset.
@ -598,17 +597,19 @@ LZ4_FORCE_INLINE void LZ4_prepareTable(
//if (tableType == byU32) cctx->currentOffset += 64 KB;
}
}
/* If the current offset is zero, we will never look in the external
* dictionary context, since there is no value a table entry can take that
* indicates a miss. In that case, we need to bump the offset to something
* non-zero.
/* Adding a gap, so all previous entries are > MAX_DISTANCE back, is faster
* than compressing without a gap. However, compressing with
* currentOffset == 0 is faster still, so we preserve that case.
*/
if (dictDirective == usingDictCtx &&
tableType != byPtr &&
cctx->currentOffset == 0)
{
cctx->currentOffset = 1;
if (cctx->currentOffset != 0 && tableType == byU32) {
cctx->currentOffset += 64 KB;
}
/* Finally, clear history */
cctx->dictCtx = NULL;
cctx->dictionary = NULL;
cctx->dictSize = 0;
}
/** LZ4_compress_generic() :
@ -944,54 +945,47 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int
}
/**
* LZ4_compress_fast_extState_noReset is a variant of LZ4_compress_fast_extState
* that can be used when the state is known to have already been initialized
* (via LZ4_resetStream or an earlier call to LZ4_compress_fast_extState /
* LZ4_compress_fast_extState_noReset). This can provide significantly better
* performance when the context reset would otherwise be a significant part of
* the cost of the compression, e.g., when the data to be compressed is small.
* LZ4_compress_fast_extState_fastReset() :
* A variant of LZ4_compress_fast_extState().
*
* Using this variant avoids an expensive initialization step. It is only safe
* to call if the state buffer is known to be correctly initialized already
* (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
* "correctly initialized").
*/
int LZ4_compress_fast_extState_noReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
int LZ4_compress_fast_extState_fastReset(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
{
LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
ctx->dictionary = NULL;
ctx->dictSize = 0;
ctx->dictCtx = NULL;
if (dstCapacity >= LZ4_compressBound(srcSize)) {
if (srcSize < LZ4_64Klimit) {
const tableType_t tableType = byU16;
LZ4_prepareTable(ctx, srcSize, tableType, noDict);
LZ4_prepareTable(ctx, inputSize, tableType);
LZ4_prepareTable>>> dev
if (ctx->currentOffset) {
return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, dictSmall, acceleration);
} else {
return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
}
} else {
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
LZ4_prepareTable(ctx, srcSize, tableType, noDict);
if (ctx->currentOffset) {
ctx->currentOffset += 64 KB;
}
return LZ4_compress_generic(ctx, src, dst, srcSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
const tableType_t tableType = (sizeof(void*)==8) ? byU32 : byPtr;
LZ4_prepareTable(ctx, inputSize, tableType);
return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
}
} else {
if (srcSize < LZ4_64Klimit) {
const tableType_t tableType = byU16;
LZ4_prepareTable(ctx, srcSize, tableType, noDict);
LZ4_prepareTable(ctx, inputSize, tableType);
if (ctx->currentOffset) {
return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
} else {
return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
}
} else {
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32;
LZ4_prepareTable(ctx, srcSize, tableType, noDict);
if (ctx->currentOffset) {
ctx->currentOffset += 64 KB;
}
return LZ4_compress_generic(ctx, src, dst, srcSize, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
const tableType_t tableType = (sizeof(void*)==8) ? byU32 : byPtr;
LZ4_prepareTable(ctx, inputSize, tableType);
return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
}
}
}
@ -1250,7 +1244,10 @@ void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
{
DEBUGLOG(5, "LZ4_resetStream %p", LZ4_stream);
MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
LZ4_stream->internal_donotuse.tableType = clearedTable;
}
void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
}
int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
@ -1266,37 +1263,50 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
{
LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
const tableType_t tableType = byU32;
const BYTE* p = (const BYTE*)dictionary;
const BYTE* const dictEnd = p + dictSize;
const BYTE* base;
DEBUGLOG(4, "LZ4_loadDict %p", LZ4_dict);
if ((dict->initCheck)
|| (dict->tableType != byU32 && dict->tableType != clearedTable)
|| (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */
LZ4_resetStream(LZ4_dict);
LZ4_prepareTable(dict, 0, tableType);
if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
dict->currentOffset += 64 KB;
base = p - dict->currentOffset;
dict->dictionary = p;
dict->dictSize = (U32)(dictEnd - p);
dict->currentOffset += dict->dictSize;
dict->tableType = byU32;
dict->tableType = tableType;
if (dictSize < (int)HASH_UNIT) {
return 0;
}
while (p <= dictEnd-HASH_UNIT) {
LZ4_putPosition(p, dict->hashTable, byU32, base);
LZ4_putPosition(p, dict->hashTable, tableType, base);
p+=3;
}
return dict->dictSize;
}
void LZ4_attach_dictionary(LZ4_stream_t *working_stream, const LZ4_stream_t *dictionary_stream) {
if (dictionary_stream != NULL) {
/* If the current offset is zero, we will never look in the
* external dictionary context, since there is no value a table
* entry can take that indicate a miss. In that case, we need
* to bump the offset to something non-zero.
*/
if (working_stream->internal_donotuse.currentOffset == 0) {
working_stream->internal_donotuse.currentOffset = 64 KB;
}
working_stream->internal_donotuse.dictCtx = &(dictionary_stream->internal_donotuse);
} else {
working_stream->internal_donotuse.dictCtx = NULL;
}
}
static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
{
@ -1342,7 +1352,6 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch
/* prefix mode : source data follows dictionary */
if (dictEnd == (const BYTE*)source) {
LZ4_prepareTable(streamPtr, inputSize, tableType, withPrefix64k);
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
return LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
else
@ -1366,11 +1375,9 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch
memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
} else {
LZ4_prepareTable(streamPtr, inputSize, tableType, usingDictCtx);
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
}
} else { /* no dictCtx */
LZ4_prepareTable(streamPtr, inputSize, tableType, usingExtDict);
} else {
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
} else {

View File

@ -353,18 +353,69 @@ LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int or
* Their signatures may change.
**************************************/
/*!
LZ4_compress_fast_extState_noReset() :
A variant of LZ4_compress_fast_extState().
#ifdef LZ4_STATIC_LINKING_ONLY
Use the _noReset variant if LZ4_resetStream() was called on the state
buffer before being used for the first time (calls to both extState
functions leave the state in a safe state, so zeroing is not required
between calls). Otherwise, using the plain _extState requires LZ4 to
reinitialize the state internally for every call.
*/
LZ4LIB_API int LZ4_compress_fast_extState_noReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
/*! LZ4_resetStream_fast() :
* When an LZ4_stream_t is known to be in a internally coherent state,
* it can often be prepared for a new compression with almost no work, only
* sometimes falling back to the full, expensive reset that is always required
* when the stream is in an indeterminate state (i.e., the reset performed by
* LZ4_resetStream()).
*
* LZ4_streams are guaranteed to be in a valid state when:
* - returned from LZ4_createStream()
* - reset by LZ4_resetStream()
* - memset(stream, 0, sizeof(LZ4_stream_t))
* - the stream was in a valid state and was reset by LZ4_resetStream_fast()
* - the stream was in a valid state and was then used in any compression call
* that returned success
* - the stream was in an indeterminate state and was used in a compression
* call that fully reset the state (LZ4_compress_fast_extState()) and that
* returned success
*/
LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
/*! LZ4_compress_fast_extState_fastReset() :
* A variant of LZ4_compress_fast_extState().
*
* Using this variant avoids an expensive initialization step. It is only safe
* to call if the state buffer is known to be correctly initialized already
* (see above comment on LZ4_resetStream_fast() for a definition of "correctly
* initialized"). From a high level, the difference is that this function
* initializes the provided state with a call to LZ4_resetStream_fast() while
* LZ4_compress_fast_extState() starts with a call to LZ4_resetStream().
*/
LZ4LIB_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
/*! LZ4_attach_dictionary() :
* This is an experimental API that allows for the efficient use of a
* static dictionary many times.
*
* Rather than re-loading the dictionary buffer into a working context before
* each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
* working LZ4_stream_t, this function introduces a no-copy setup mechanism,
* in which the working stream references the dictionary stream in-place.
*
* Several assumptions are made about the state of the dictionary stream.
* Currently, only streams which have been prepared by LZ4_loadDict() should
* be expected to work.
*
* Alternatively, the provided dictionary stream pointer may be NULL, in which
* case any existing dictionary stream is unset.
*
* If a dictionary is provided, it replaces any pre-existing stream history.
* The dictionary contents are the only history that can be referenced and
* logically immediately precede the data compressed in the first subsequent
* compression call.
*
* The dictionary will only remain attached to the working stream through the
* first compression call, at the end of which it is cleared. The dictionary
* stream (and source buffer) must remain in-place / accessible / unchanged
* through the completion of the first compression call on the stream.
*/
LZ4LIB_API void LZ4_attach_dictionary(LZ4_stream_t *working_stream, const LZ4_stream_t *dictionary_stream);
#endif
/*-************************************
* Private definitions

View File

@ -74,6 +74,7 @@ You can contact the author at :
* Includes
**************************************/
#include "lz4frame_static.h"
#define LZ4_STATIC_LINKING_ONLY
#include "lz4.h"
#define LZ4_HC_STATIC_LINKING_ONLY
#include "lz4hc.h"
@ -530,13 +531,8 @@ static void LZ4F_applyCDict(void* ctx,
const LZ4F_CDict* cdict,
int level) {
if (level < LZ4HC_CLEVEL_MIN) {
LZ4_stream_t_internal* internal_ctx = &((LZ4_stream_t *)ctx)->internal_donotuse;
assert(!internal_ctx->initCheck);
/* Clear any local dictionary */
internal_ctx->dictionary = NULL;
internal_ctx->dictSize = 0;
/* Point to the dictionary context */
internal_ctx->dictCtx = cdict ? &(cdict->fastCtx->internal_donotuse) : NULL;
LZ4_resetStream_fast((LZ4_stream_t *)ctx);
LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL);
} else {
if (cdict) {
memcpy(ctx, cdict->HCCtx, sizeof(*cdict->HCCtx));
@ -716,7 +712,7 @@ static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize
if (cdict) {
return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
} else {
return LZ4_compress_fast_extState_noReset(ctx, src, dst, srcSize, dstCapacity, acceleration);
return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration);
}
}