2017-09-02 01:28:35 +00:00
/*
* Copyright ( c ) 2016 - present , Przemyslaw Skibinski , Yann Collet , Facebook , Inc .
* All rights reserved .
*
* This source code is licensed under both the BSD - style license ( found in the
* LICENSE file in the root directory of this source tree ) and the GPLv2 ( found
* in the COPYING file in the root directory of this source tree ) .
2017-09-08 07:09:23 +00:00
* You may select , at your option , one of the above - listed licenses .
2017-09-02 01:28:35 +00:00
*/
2017-11-08 00:15:23 +00:00
# include "zstd_compress_internal.h"
2018-06-13 23:49:31 +00:00
# include "hist.h"
2017-09-02 01:28:35 +00:00
# include "zstd_opt.h"
2018-05-10 23:32:36 +00:00
# define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
2017-11-08 19:05:32 +00:00
# define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */
# define ZSTD_MAX_PRICE (1<<30)
2017-09-02 01:28:35 +00:00
2018-12-18 20:32:58 +00:00
# define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
2017-11-13 10:19:36 +00:00
2017-09-02 01:28:35 +00:00
/*-*************************************
* Price functions for optimal parser
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2018-05-16 21:53:35 +00:00
#if 0 /* approximation at bit level */
# define BITCOST_ACCURACY 0
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
# define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat))
2018-05-17 19:19:37 +00:00
# elif 0 /* fractional bit accuracy */
2018-05-16 21:53:35 +00:00
# define BITCOST_ACCURACY 8
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
2018-05-17 18:19:05 +00:00
# else /* opt==approx, ultra==accurate */
2018-05-16 21:53:35 +00:00
# define BITCOST_ACCURACY 8
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
2018-05-17 18:19:05 +00:00
# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
2018-05-16 21:53:35 +00:00
# endif
MEM_STATIC U32 ZSTD_bitWeight ( U32 stat )
{
2018-05-17 18:19:05 +00:00
return ( ZSTD_highbit32 ( stat + 1 ) * BITCOST_MULTIPLIER ) ;
2018-05-16 21:53:35 +00:00
}
2018-05-17 18:19:05 +00:00
MEM_STATIC U32 ZSTD_fracWeight ( U32 rawStat )
2018-05-16 21:53:35 +00:00
{
2018-05-17 18:19:05 +00:00
U32 const stat = rawStat + 1 ;
U32 const hb = ZSTD_highbit32 ( stat ) ;
2018-05-16 21:53:35 +00:00
U32 const BWeight = hb * BITCOST_MULTIPLIER ;
U32 const FWeight = ( stat < < BITCOST_ACCURACY ) > > hb ;
U32 const weight = BWeight + FWeight ;
assert ( hb + BITCOST_ACCURACY < 31 ) ;
return weight ;
}
2018-12-22 17:09:40 +00:00
# if (DEBUGLEVEL>=2)
/* debugging function,
* @ return price in bytes as fractional value
* for debug messages only */
2018-05-16 21:53:35 +00:00
MEM_STATIC double ZSTD_fCost ( U32 price )
2017-09-02 01:28:35 +00:00
{
2018-05-16 21:53:35 +00:00
return ( double ) price / ( BITCOST_MULTIPLIER * 8 ) ;
}
2018-12-22 17:09:40 +00:00
# endif
2018-05-16 21:53:35 +00:00
2019-02-15 18:29:03 +00:00
static int ZSTD_compressedLiterals ( optState_t const * const optPtr )
{
return optPtr - > literalCompressionMode ! = ZSTD_lcm_uncompressed ;
}
2018-05-16 21:53:35 +00:00
static void ZSTD_setBasePrices ( optState_t * optPtr , int optLevel )
{
2019-02-15 18:29:03 +00:00
if ( ZSTD_compressedLiterals ( optPtr ) )
optPtr - > litSumBasePrice = WEIGHT ( optPtr - > litSum , optLevel ) ;
2018-05-16 21:53:35 +00:00
optPtr - > litLengthSumBasePrice = WEIGHT ( optPtr - > litLengthSum , optLevel ) ;
optPtr - > matchLengthSumBasePrice = WEIGHT ( optPtr - > matchLengthSum , optLevel ) ;
optPtr - > offCodeSumBasePrice = WEIGHT ( optPtr - > offCodeSum , optLevel ) ;
2017-09-02 01:28:35 +00:00
}
2018-12-17 23:48:34 +00:00
/* ZSTD_downscaleStat() :
* reduce all elements in table by a factor 2 ^ ( ZSTD_FREQ_DIV + malus )
* return the resulting sum of elements */
fix confusion between unsigned <-> U32
as suggested in #1441.
generally U32 and unsigned are the same thing,
except when they are not ...
case : 32-bit compilation for MIPS (uint32_t == unsigned long)
A vast majority of transformation consists in transforming U32 into unsigned.
In rare cases, it's the other way around (typically for internal code, such as seeds).
Among a few issues this patches solves :
- some parameters were declared with type `unsigned` in *.h,
but with type `U32` in their implementation *.c .
- some parameters have type unsigned*,
but the caller user a pointer to U32 instead.
These fixes are useful.
However, the bulk of changes is about %u formating,
which requires unsigned type,
but generally receives U32 values instead,
often just for brevity (U32 is shorter than unsigned).
These changes are generally minor, or even annoying.
As a consequence, the amount of code changed is larger than I would expect for such a patch.
Testing is also a pain :
it requires manually modifying `mem.h`,
in order to lie about `U32`
and force it to be an `unsigned long` typically.
On a 64-bit system, this will break the equivalence unsigned == U32.
Unfortunately, it will also break a few static_assert(), controlling structure sizes.
So it also requires modifying `debug.h` to make `static_assert()` a noop.
And then reverting these changes.
So it's inconvenient, and as a consequence,
this property is currently not checked during CI tests.
Therefore, these problems can emerge again in the future.
I wonder if it is worth ensuring proper distinction of U32 != unsigned in CI tests.
It's another restriction for coding, adding more frustration during merge tests,
since most platforms don't need this distinction (hence contributor will not see it),
and while this can matter in theory, the number of platforms impacted seems minimal.
Thoughts ?
2018-12-22 00:19:44 +00:00
static U32 ZSTD_downscaleStat ( unsigned * table , U32 lastEltIndex , int malus )
2018-05-17 19:19:37 +00:00
{
U32 s , sum = 0 ;
fix confusion between unsigned <-> U32
as suggested in #1441.
generally U32 and unsigned are the same thing,
except when they are not ...
case : 32-bit compilation for MIPS (uint32_t == unsigned long)
A vast majority of transformation consists in transforming U32 into unsigned.
In rare cases, it's the other way around (typically for internal code, such as seeds).
Among a few issues this patches solves :
- some parameters were declared with type `unsigned` in *.h,
but with type `U32` in their implementation *.c .
- some parameters have type unsigned*,
but the caller user a pointer to U32 instead.
These fixes are useful.
However, the bulk of changes is about %u formating,
which requires unsigned type,
but generally receives U32 values instead,
often just for brevity (U32 is shorter than unsigned).
These changes are generally minor, or even annoying.
As a consequence, the amount of code changed is larger than I would expect for such a patch.
Testing is also a pain :
it requires manually modifying `mem.h`,
in order to lie about `U32`
and force it to be an `unsigned long` typically.
On a 64-bit system, this will break the equivalence unsigned == U32.
Unfortunately, it will also break a few static_assert(), controlling structure sizes.
So it also requires modifying `debug.h` to make `static_assert()` a noop.
And then reverting these changes.
So it's inconvenient, and as a consequence,
this property is currently not checked during CI tests.
Therefore, these problems can emerge again in the future.
I wonder if it is worth ensuring proper distinction of U32 != unsigned in CI tests.
It's another restriction for coding, adding more frustration during merge tests,
since most platforms don't need this distinction (hence contributor will not see it),
and while this can matter in theory, the number of platforms impacted seems minimal.
Thoughts ?
2018-12-22 00:19:44 +00:00
DEBUGLOG ( 5 , " ZSTD_downscaleStat (nbElts=%u) " , ( unsigned ) lastEltIndex + 1 ) ;
2018-05-17 19:19:37 +00:00
assert ( ZSTD_FREQ_DIV + malus > 0 & & ZSTD_FREQ_DIV + malus < 31 ) ;
2018-12-17 23:48:34 +00:00
for ( s = 0 ; s < lastEltIndex + 1 ; s + + ) {
2018-05-17 19:19:37 +00:00
table [ s ] = 1 + ( table [ s ] > > ( ZSTD_FREQ_DIV + malus ) ) ;
sum + = table [ s ] ;
}
return sum ;
}
2018-12-17 23:48:34 +00:00
/* ZSTD_rescaleFreqs() :
* if first block ( detected by optPtr - > litLengthSum = = 0 ) : init statistics
* take hints from dictionary if there is one
* or init from zero , using src for literals stats , or flat 1 for match symbols
* otherwise downscale existing stats , to be used as seed for next block .
*/
static void
ZSTD_rescaleFreqs ( optState_t * const optPtr ,
2018-12-19 18:11:06 +00:00
const BYTE * const src , size_t const srcSize ,
int const optLevel )
2017-09-02 01:28:35 +00:00
{
2019-02-15 18:29:03 +00:00
int const compressedLiterals = ZSTD_compressedLiterals ( optPtr ) ;
2018-12-19 18:11:06 +00:00
DEBUGLOG ( 5 , " ZSTD_rescaleFreqs (srcSize=%u) " , ( unsigned ) srcSize ) ;
2018-05-10 23:32:36 +00:00
optPtr - > priceType = zop_dynamic ;
2017-09-02 01:28:35 +00:00
2018-05-10 23:32:36 +00:00
if ( optPtr - > litLengthSum = = 0 ) { /* first block : init */
2018-12-19 18:11:06 +00:00
if ( srcSize < = ZSTD_PREDEF_THRESHOLD ) { /* heuristic */
DEBUGLOG ( 5 , " (srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef " ) ;
2018-05-10 23:32:36 +00:00
optPtr - > priceType = zop_predef ;
2018-12-19 18:11:06 +00:00
}
2018-05-10 23:32:36 +00:00
2018-05-08 22:37:06 +00:00
assert ( optPtr - > symbolCosts ! = NULL ) ;
2018-12-19 18:11:06 +00:00
if ( optPtr - > symbolCosts - > huf . repeatMode = = HUF_repeat_valid ) {
/* huffman table presumed generated by dictionary */
2018-05-15 01:04:08 +00:00
optPtr - > priceType = zop_dynamic ;
2018-05-10 23:32:36 +00:00
2019-02-15 18:29:03 +00:00
if ( compressedLiterals ) {
unsigned lit ;
assert ( optPtr - > litFreq ! = NULL ) ;
optPtr - > litSum = 0 ;
2018-05-11 00:59:12 +00:00
for ( lit = 0 ; lit < = MaxLit ; lit + + ) {
2018-05-11 01:13:48 +00:00
U32 const scaleLog = 11 ; /* scale to 2K */
2018-05-22 23:06:33 +00:00
U32 const bitCost = HUF_getNbBits ( optPtr - > symbolCosts - > huf . CTable , lit ) ;
2018-05-15 16:46:20 +00:00
assert ( bitCost < = scaleLog ) ;
2018-05-11 00:59:12 +00:00
optPtr - > litFreq [ lit ] = bitCost ? 1 < < ( scaleLog - bitCost ) : 1 /*minimum to calculate cost*/ ;
optPtr - > litSum + = optPtr - > litFreq [ lit ] ;
} }
2018-05-10 23:32:36 +00:00
2018-05-11 00:59:12 +00:00
{ unsigned ll ;
FSE_CState_t llstate ;
2018-05-22 23:06:33 +00:00
FSE_initCState ( & llstate , optPtr - > symbolCosts - > fse . litlengthCTable ) ;
2018-05-11 00:59:12 +00:00
optPtr - > litLengthSum = 0 ;
for ( ll = 0 ; ll < = MaxLL ; ll + + ) {
2018-05-11 01:13:48 +00:00
U32 const scaleLog = 10 ; /* scale to 1K */
2018-05-11 00:59:12 +00:00
U32 const bitCost = FSE_getMaxNbBits ( llstate . symbolTT , ll ) ;
assert ( bitCost < scaleLog ) ;
optPtr - > litLengthFreq [ ll ] = bitCost ? 1 < < ( scaleLog - bitCost ) : 1 /*minimum to calculate cost*/ ;
optPtr - > litLengthSum + = optPtr - > litLengthFreq [ ll ] ;
} }
2017-09-02 01:28:35 +00:00
2018-05-11 00:59:12 +00:00
{ unsigned ml ;
FSE_CState_t mlstate ;
2018-05-22 23:06:33 +00:00
FSE_initCState ( & mlstate , optPtr - > symbolCosts - > fse . matchlengthCTable ) ;
2018-05-11 00:59:12 +00:00
optPtr - > matchLengthSum = 0 ;
for ( ml = 0 ; ml < = MaxML ; ml + + ) {
2018-05-11 01:13:48 +00:00
U32 const scaleLog = 10 ;
2018-05-11 00:59:12 +00:00
U32 const bitCost = FSE_getMaxNbBits ( mlstate . symbolTT , ml ) ;
assert ( bitCost < scaleLog ) ;
optPtr - > matchLengthFreq [ ml ] = bitCost ? 1 < < ( scaleLog - bitCost ) : 1 /*minimum to calculate cost*/ ;
optPtr - > matchLengthSum + = optPtr - > matchLengthFreq [ ml ] ;
} }
{ unsigned of ;
FSE_CState_t ofstate ;
2018-05-22 23:06:33 +00:00
FSE_initCState ( & ofstate , optPtr - > symbolCosts - > fse . offcodeCTable ) ;
2018-05-11 00:59:12 +00:00
optPtr - > offCodeSum = 0 ;
for ( of = 0 ; of < = MaxOff ; of + + ) {
2018-05-11 01:13:48 +00:00
U32 const scaleLog = 10 ;
2018-05-11 00:59:12 +00:00
U32 const bitCost = FSE_getMaxNbBits ( ofstate . symbolTT , of ) ;
assert ( bitCost < scaleLog ) ;
optPtr - > offCodeFreq [ of ] = bitCost ? 1 < < ( scaleLog - bitCost ) : 1 /*minimum to calculate cost*/ ;
optPtr - > offCodeSum + = optPtr - > offCodeFreq [ of ] ;
} }
2017-11-08 19:05:32 +00:00
2018-05-11 00:59:12 +00:00
} else { /* not a dictionary */
assert ( optPtr - > litFreq ! = NULL ) ;
2019-02-15 18:29:03 +00:00
if ( compressedLiterals ) {
unsigned lit = MaxLit ;
2018-06-13 23:49:31 +00:00
HIST_count_simple ( optPtr - > litFreq , & lit , src , srcSize ) ; /* use raw first block to init statistics */
2019-02-15 18:29:03 +00:00
optPtr - > litSum = ZSTD_downscaleStat ( optPtr - > litFreq , MaxLit , 1 ) ;
2018-05-17 19:19:37 +00:00
}
2018-05-11 00:59:12 +00:00
{ unsigned ll ;
for ( ll = 0 ; ll < = MaxLL ; ll + + )
optPtr - > litLengthFreq [ ll ] = 1 ;
}
2018-05-16 21:53:35 +00:00
optPtr - > litLengthSum = MaxLL + 1 ;
2018-05-11 00:59:12 +00:00
{ unsigned ml ;
for ( ml = 0 ; ml < = MaxML ; ml + + )
optPtr - > matchLengthFreq [ ml ] = 1 ;
}
2018-05-16 21:53:35 +00:00
optPtr - > matchLengthSum = MaxML + 1 ;
2018-05-11 00:59:12 +00:00
{ unsigned of ;
for ( of = 0 ; of < = MaxOff ; of + + )
optPtr - > offCodeFreq [ of ] = 1 ;
}
2018-05-16 21:53:35 +00:00
optPtr - > offCodeSum = MaxOff + 1 ;
2018-05-11 00:59:12 +00:00
}
2017-11-07 23:27:06 +00:00
2018-05-10 23:32:36 +00:00
} else { /* new block : re-use previous statistics, scaled down */
2017-09-02 01:28:35 +00:00
2019-02-15 18:29:03 +00:00
if ( compressedLiterals )
optPtr - > litSum = ZSTD_downscaleStat ( optPtr - > litFreq , MaxLit , 1 ) ;
2018-05-17 19:19:37 +00:00
optPtr - > litLengthSum = ZSTD_downscaleStat ( optPtr - > litLengthFreq , MaxLL , 0 ) ;
optPtr - > matchLengthSum = ZSTD_downscaleStat ( optPtr - > matchLengthFreq , MaxML , 0 ) ;
optPtr - > offCodeSum = ZSTD_downscaleStat ( optPtr - > offCodeFreq , MaxOff , 0 ) ;
2017-09-02 01:28:35 +00:00
}
2018-05-16 21:53:35 +00:00
ZSTD_setBasePrices ( optPtr , optLevel ) ;
2018-05-09 22:46:11 +00:00
}
2017-11-29 23:19:00 +00:00
/* ZSTD_rawLiteralsCost() :
2018-05-16 21:53:35 +00:00
* price of literals ( only ) in specified segment ( which length can be 0 ) .
* does not include price of literalLength symbol */
zstd_opt: changed cost formula
There was a flaw in the formula
which compared literal cost with match cost :
at a given position,
a non-null literal suite is going to be part of next sequence,
while if position ends a previous match, to immediately start another match,
next sequence will have a litlength of zero.
A litlength of zero has a non-null cost.
It follows that literals cost should be compared to match cost + litlength==0.
Not doing so gave a structural advantage to matches, which would be selected more often.
I believe that's what led to the creation of the strange heuristic which added a complex cost to matches.
The heuristic was actually compensating.
It was probably created through multiple trials, settling for best outcome on a given scenario (I suspect silesia.tar).
The problem with this heuristic is that it's hard to understand,
and unfortunately, any future change in the parser would impact the way it should be calculated and its effects.
The "proper" formula makes it possible to remove this heuristic.
Now, the problem is : in a head to head comparison, it's sometimes better, sometimes worse.
Note that all differences are small (< 0.01 ratio).
In general, the newer formula is better for smaller files (for example, calgary.tar and enwik7).
I suspect that's because starting statistics are pretty poor (another area of improvement).
However, for silesia.tar specifically, it's worse at level 22 (while being better at level 17, so even compression level has an impact ...).
It's a pity that zstd -22 gets worse on silesia.tar.
That being said, I like that the new code gets rid of strange variables,
which were introducing complexity for any future evolution (faster variants being in mind).
Therefore, in spite of this detrimental side effect, I tend to be in favor of it.
2017-11-28 22:07:03 +00:00
static U32 ZSTD_rawLiteralsCost ( const BYTE * const literals , U32 const litLength ,
2018-05-16 21:53:35 +00:00
const optState_t * const optPtr ,
int optLevel )
2017-11-28 20:14:46 +00:00
{
2018-05-09 17:48:09 +00:00
if ( litLength = = 0 ) return 0 ;
2019-02-15 18:29:03 +00:00
if ( ! ZSTD_compressedLiterals ( optPtr ) )
return ( litLength < < 3 ) * BITCOST_MULTIPLIER ; /* Uncompressed - 8 bytes per literal. */
2018-05-17 19:19:37 +00:00
if ( optPtr - > priceType = = zop_predef )
return ( litLength * 6 ) * BITCOST_MULTIPLIER ; /* 6 bit per literal - no statistic used */
2017-11-28 20:14:46 +00:00
2018-05-09 17:48:09 +00:00
/* dynamic statistics */
2018-05-29 22:29:55 +00:00
{ U32 price = litLength * optPtr - > litSumBasePrice ;
U32 u ;
for ( u = 0 ; u < litLength ; u + + ) {
assert ( WEIGHT ( optPtr - > litFreq [ literals [ u ] ] , optLevel ) < = optPtr - > litSumBasePrice ) ; /* literal cost should never be negative */
price - = WEIGHT ( optPtr - > litFreq [ literals [ u ] ] , optLevel ) ;
2018-05-29 21:07:25 +00:00
}
2018-05-16 21:53:35 +00:00
return price ;
2017-11-28 20:14:46 +00:00
}
}
2017-11-29 23:19:00 +00:00
/* ZSTD_litLengthPrice() :
* cost of literalLength symbol */
2018-05-16 21:53:35 +00:00
static U32 ZSTD_litLengthPrice ( U32 const litLength , const optState_t * const optPtr , int optLevel )
2017-11-28 20:14:46 +00:00
{
2018-05-16 21:53:35 +00:00
if ( optPtr - > priceType = = zop_predef ) return WEIGHT ( litLength , optLevel ) ;
2017-11-28 20:14:46 +00:00
2018-05-09 00:43:13 +00:00
/* dynamic statistics */
2017-11-28 20:14:46 +00:00
{ U32 const llCode = ZSTD_LLcode ( litLength ) ;
2018-12-19 18:11:06 +00:00
return ( LL_bits [ llCode ] * BITCOST_MULTIPLIER )
+ optPtr - > litLengthSumBasePrice
- WEIGHT ( optPtr - > litLengthFreq [ llCode ] , optLevel ) ;
2017-11-28 20:14:46 +00:00
}
}
2017-11-29 23:19:00 +00:00
/* ZSTD_litLengthContribution() :
* @ return ( cost ( litlength ) - cost ( 0 ) )
* this value can then be added to rawLiteralsCost ( )
* to provide a cost which is directly comparable to a match ending at same position */
2018-05-16 21:53:35 +00:00
static int ZSTD_litLengthContribution ( U32 const litLength , const optState_t * const optPtr , int optLevel )
zstd_opt: changed cost formula
There was a flaw in the formula
which compared literal cost with match cost :
at a given position,
a non-null literal suite is going to be part of next sequence,
while if position ends a previous match, to immediately start another match,
next sequence will have a litlength of zero.
A litlength of zero has a non-null cost.
It follows that literals cost should be compared to match cost + litlength==0.
Not doing so gave a structural advantage to matches, which would be selected more often.
I believe that's what led to the creation of the strange heuristic which added a complex cost to matches.
The heuristic was actually compensating.
It was probably created through multiple trials, settling for best outcome on a given scenario (I suspect silesia.tar).
The problem with this heuristic is that it's hard to understand,
and unfortunately, any future change in the parser would impact the way it should be calculated and its effects.
The "proper" formula makes it possible to remove this heuristic.
Now, the problem is : in a head to head comparison, it's sometimes better, sometimes worse.
Note that all differences are small (< 0.01 ratio).
In general, the newer formula is better for smaller files (for example, calgary.tar and enwik7).
I suspect that's because starting statistics are pretty poor (another area of improvement).
However, for silesia.tar specifically, it's worse at level 22 (while being better at level 17, so even compression level has an impact ...).
It's a pity that zstd -22 gets worse on silesia.tar.
That being said, I like that the new code gets rid of strange variables,
which were introducing complexity for any future evolution (faster variants being in mind).
Therefore, in spite of this detrimental side effect, I tend to be in favor of it.
2017-11-28 22:07:03 +00:00
{
2019-05-28 22:26:52 +00:00
if ( optPtr - > priceType > = zop_predef ) return ( int ) WEIGHT ( litLength , optLevel ) ;
zstd_opt: changed cost formula
There was a flaw in the formula
which compared literal cost with match cost :
at a given position,
a non-null literal suite is going to be part of next sequence,
while if position ends a previous match, to immediately start another match,
next sequence will have a litlength of zero.
A litlength of zero has a non-null cost.
It follows that literals cost should be compared to match cost + litlength==0.
Not doing so gave a structural advantage to matches, which would be selected more often.
I believe that's what led to the creation of the strange heuristic which added a complex cost to matches.
The heuristic was actually compensating.
It was probably created through multiple trials, settling for best outcome on a given scenario (I suspect silesia.tar).
The problem with this heuristic is that it's hard to understand,
and unfortunately, any future change in the parser would impact the way it should be calculated and its effects.
The "proper" formula makes it possible to remove this heuristic.
Now, the problem is : in a head to head comparison, it's sometimes better, sometimes worse.
Note that all differences are small (< 0.01 ratio).
In general, the newer formula is better for smaller files (for example, calgary.tar and enwik7).
I suspect that's because starting statistics are pretty poor (another area of improvement).
However, for silesia.tar specifically, it's worse at level 22 (while being better at level 17, so even compression level has an impact ...).
It's a pity that zstd -22 gets worse on silesia.tar.
That being said, I like that the new code gets rid of strange variables,
which were introducing complexity for any future evolution (faster variants being in mind).
Therefore, in spite of this detrimental side effect, I tend to be in favor of it.
2017-11-28 22:07:03 +00:00
2018-05-09 17:48:09 +00:00
/* dynamic statistics */
zstd_opt: changed cost formula
There was a flaw in the formula
which compared literal cost with match cost :
at a given position,
a non-null literal suite is going to be part of next sequence,
while if position ends a previous match, to immediately start another match,
next sequence will have a litlength of zero.
A litlength of zero has a non-null cost.
It follows that literals cost should be compared to match cost + litlength==0.
Not doing so gave a structural advantage to matches, which would be selected more often.
I believe that's what led to the creation of the strange heuristic which added a complex cost to matches.
The heuristic was actually compensating.
It was probably created through multiple trials, settling for best outcome on a given scenario (I suspect silesia.tar).
The problem with this heuristic is that it's hard to understand,
and unfortunately, any future change in the parser would impact the way it should be calculated and its effects.
The "proper" formula makes it possible to remove this heuristic.
Now, the problem is : in a head to head comparison, it's sometimes better, sometimes worse.
Note that all differences are small (< 0.01 ratio).
In general, the newer formula is better for smaller files (for example, calgary.tar and enwik7).
I suspect that's because starting statistics are pretty poor (another area of improvement).
However, for silesia.tar specifically, it's worse at level 22 (while being better at level 17, so even compression level has an impact ...).
It's a pity that zstd -22 gets worse on silesia.tar.
That being said, I like that the new code gets rid of strange variables,
which were introducing complexity for any future evolution (faster variants being in mind).
Therefore, in spite of this detrimental side effect, I tend to be in favor of it.
2017-11-28 22:07:03 +00:00
{ U32 const llCode = ZSTD_LLcode ( litLength ) ;
2019-05-28 22:26:52 +00:00
int const contribution = ( int ) ( LL_bits [ llCode ] * BITCOST_MULTIPLIER )
+ ( int ) WEIGHT ( optPtr - > litLengthFreq [ 0 ] , optLevel ) /* note: log2litLengthSum cancel out */
- ( int ) WEIGHT ( optPtr - > litLengthFreq [ llCode ] , optLevel ) ;
zstd_opt: changed cost formula
There was a flaw in the formula
which compared literal cost with match cost :
at a given position,
a non-null literal suite is going to be part of next sequence,
while if position ends a previous match, to immediately start another match,
next sequence will have a litlength of zero.
A litlength of zero has a non-null cost.
It follows that literals cost should be compared to match cost + litlength==0.
Not doing so gave a structural advantage to matches, which would be selected more often.
I believe that's what led to the creation of the strange heuristic which added a complex cost to matches.
The heuristic was actually compensating.
It was probably created through multiple trials, settling for best outcome on a given scenario (I suspect silesia.tar).
The problem with this heuristic is that it's hard to understand,
and unfortunately, any future change in the parser would impact the way it should be calculated and its effects.
The "proper" formula makes it possible to remove this heuristic.
Now, the problem is : in a head to head comparison, it's sometimes better, sometimes worse.
Note that all differences are small (< 0.01 ratio).
In general, the newer formula is better for smaller files (for example, calgary.tar and enwik7).
I suspect that's because starting statistics are pretty poor (another area of improvement).
However, for silesia.tar specifically, it's worse at level 22 (while being better at level 17, so even compression level has an impact ...).
It's a pity that zstd -22 gets worse on silesia.tar.
That being said, I like that the new code gets rid of strange variables,
which were introducing complexity for any future evolution (faster variants being in mind).
Therefore, in spite of this detrimental side effect, I tend to be in favor of it.
2017-11-28 22:07:03 +00:00
# if 1
return contribution ;
# else
return MAX ( 0 , contribution ) ; /* sometimes better, sometimes not ... */
# endif
}
}
2017-11-28 20:14:46 +00:00
/* ZSTD_getMatchPrice() :
2017-11-29 23:19:00 +00:00
* Provides the cost of the match part ( offset + matchLength ) of a sequence
* Must be combined with ZSTD_fullLiteralsCost ( ) to get the full cost of a sequence .
2017-11-19 18:21:21 +00:00
* optLevel : when < 2 , favors small offset for decompression speed ( improved cache efficiency ) */
2018-05-08 19:32:16 +00:00
FORCE_INLINE_TEMPLATE U32
2018-05-16 21:53:35 +00:00
ZSTD_getMatchPrice ( U32 const offset ,
U32 const matchLength ,
2018-12-19 18:11:06 +00:00
const optState_t * const optPtr ,
2018-05-08 19:32:16 +00:00
int const optLevel )
2017-09-02 01:28:35 +00:00
{
2017-11-08 20:20:07 +00:00
U32 price ;
2017-11-19 18:21:21 +00:00
U32 const offCode = ZSTD_highbit32 ( offset + 1 ) ;
U32 const mlBase = matchLength - MINMATCH ;
assert ( matchLength > = MINMATCH ) ;
2017-09-02 01:28:35 +00:00
2018-05-08 22:37:06 +00:00
if ( optPtr - > priceType = = zop_predef ) /* fixed scheme, do not use statistics */
2018-05-16 21:53:35 +00:00
return WEIGHT ( mlBase , optLevel ) + ( ( 16 + offCode ) * BITCOST_MULTIPLIER ) ;
2017-09-02 01:28:35 +00:00
2018-05-09 17:48:09 +00:00
/* dynamic statistics */
2018-05-16 21:53:35 +00:00
price = ( offCode * BITCOST_MULTIPLIER ) + ( optPtr - > offCodeSumBasePrice - WEIGHT ( optPtr - > offCodeFreq [ offCode ] , optLevel ) ) ;
if ( ( optLevel < 2 ) /*static*/ & & offCode > = 20 )
price + = ( offCode - 19 ) * 2 * BITCOST_MULTIPLIER ; /* handicap for long distance offsets, favor decompression speed */
2017-09-02 01:28:35 +00:00
/* match Length */
2017-11-08 20:33:06 +00:00
{ U32 const mlCode = ZSTD_MLcode ( mlBase ) ;
2018-05-16 21:53:35 +00:00
price + = ( ML_bits [ mlCode ] * BITCOST_MULTIPLIER ) + ( optPtr - > matchLengthSumBasePrice - WEIGHT ( optPtr - > matchLengthFreq [ mlCode ] , optLevel ) ) ;
2017-09-02 01:28:35 +00:00
}
2018-05-25 21:52:21 +00:00
price + = BITCOST_MULTIPLIER / 5 ; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
2017-11-28 20:14:46 +00:00
DEBUGLOG ( 8 , " ZSTD_getMatchPrice(ml:%u) = %u " , matchLength , price ) ;
2018-05-16 21:53:35 +00:00
return price ;
2017-11-28 20:14:46 +00:00
}
2018-05-29 21:07:25 +00:00
/* ZSTD_updateStats() :
* assumption : literals + litLengtn < = iend */
2017-11-28 20:32:24 +00:00
static void ZSTD_updateStats ( optState_t * const optPtr ,
2017-11-29 23:19:00 +00:00
U32 litLength , const BYTE * literals ,
U32 offsetCode , U32 matchLength )
2017-09-02 01:28:35 +00:00
{
/* literals */
2019-02-15 18:29:03 +00:00
if ( ZSTD_compressedLiterals ( optPtr ) ) {
U32 u ;
2017-11-19 00:24:02 +00:00
for ( u = 0 ; u < litLength ; u + + )
optPtr - > litFreq [ literals [ u ] ] + = ZSTD_LITFREQ_ADD ;
2017-11-19 18:21:21 +00:00
optPtr - > litSum + = litLength * ZSTD_LITFREQ_ADD ;
2017-11-19 00:24:02 +00:00
}
2017-09-02 01:28:35 +00:00
/* literal Length */
2017-11-19 00:24:02 +00:00
{ U32 const llCode = ZSTD_LLcode ( litLength ) ;
2017-09-02 01:28:35 +00:00
optPtr - > litLengthFreq [ llCode ] + + ;
optPtr - > litLengthSum + + ;
}
2017-11-19 00:24:02 +00:00
/* match offset code (0-2=>repCode; 3+=>offset+2) */
{ U32 const offCode = ZSTD_highbit32 ( offsetCode + 1 ) ;
assert ( offCode < = MaxOff ) ;
2017-09-02 01:28:35 +00:00
optPtr - > offCodeFreq [ offCode ] + + ;
2017-11-19 00:24:02 +00:00
optPtr - > offCodeSum + + ;
2017-09-02 01:28:35 +00:00
}
/* match Length */
2017-11-08 20:20:07 +00:00
{ U32 const mlBase = matchLength - MINMATCH ;
2017-11-08 20:33:06 +00:00
U32 const mlCode = ZSTD_MLcode ( mlBase ) ;
2017-09-02 01:28:35 +00:00
optPtr - > matchLengthFreq [ mlCode ] + + ;
optPtr - > matchLengthSum + + ;
}
}
2017-11-29 23:19:00 +00:00
/* ZSTD_readMINMATCH() :
* function safe only for comparisons
* assumption : memPtr must be at least 4 bytes before end of buffer */
2017-11-15 02:08:17 +00:00
MEM_STATIC U32 ZSTD_readMINMATCH ( const void * memPtr , U32 length )
2017-09-02 01:28:35 +00:00
{
switch ( length )
{
default :
case 4 : return MEM_read32 ( memPtr ) ;
case 3 : if ( MEM_isLittleEndian ( ) )
return MEM_read32 ( memPtr ) < < 8 ;
else
return MEM_read32 ( memPtr ) > > 8 ;
}
}
/* Update hashTable3 up to ip (excluded)
Assumption : always within prefix ( i . e . not within extDict ) */
2019-05-28 23:18:12 +00:00
static U32 ZSTD_insertAndFindFirstIndexHash3 ( ZSTD_matchState_t * ms ,
U32 * nextToUpdate3 ,
const BYTE * const ip )
2017-09-02 01:28:35 +00:00
{
2017-12-13 00:51:00 +00:00
U32 * const hashTable3 = ms - > hashTable3 ;
U32 const hashLog3 = ms - > hashLog3 ;
2018-02-24 00:48:18 +00:00
const BYTE * const base = ms - > window . base ;
2019-05-28 23:18:12 +00:00
U32 idx = * nextToUpdate3 ;
2019-05-28 22:26:52 +00:00
U32 const target = ( U32 ) ( ip - base ) ;
2017-11-08 19:05:32 +00:00
size_t const hash3 = ZSTD_hash3Ptr ( ip , hashLog3 ) ;
2018-01-30 21:30:30 +00:00
assert ( hashLog3 > 0 ) ;
2017-09-02 01:28:35 +00:00
while ( idx < target ) {
hashTable3 [ ZSTD_hash3Ptr ( base + idx , hashLog3 ) ] = idx ;
idx + + ;
}
2019-05-28 23:18:12 +00:00
* nextToUpdate3 = target ;
2017-09-02 01:28:35 +00:00
return hashTable3 [ hash3 ] ;
}
/*-*************************************
* Binary Tree search
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
/** ZSTD_insertBt1() : add one or multiple positions to tree.
* ip : assumed < = iend - 8 .
* @ return : nb of positions added */
2018-06-21 15:20:01 +00:00
static U32 ZSTD_insertBt1 (
2018-08-23 19:11:20 +00:00
ZSTD_matchState_t * ms ,
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
const BYTE * const ip , const BYTE * const iend ,
2018-06-21 15:20:01 +00:00
U32 const mls , const int extDict )
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
{
2018-08-23 19:11:20 +00:00
const ZSTD_compressionParameters * const cParams = & ms - > cParams ;
2017-12-13 00:51:00 +00:00
U32 * const hashTable = ms - > hashTable ;
U32 const hashLog = cParams - > hashLog ;
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
size_t const h = ZSTD_hashPtr ( ip , hashLog , mls ) ;
2017-12-13 00:51:00 +00:00
U32 * const bt = ms - > chainTable ;
U32 const btLog = cParams - > chainLog - 1 ;
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
U32 const btMask = ( 1 < < btLog ) - 1 ;
U32 matchIndex = hashTable [ h ] ;
size_t commonLengthSmaller = 0 , commonLengthLarger = 0 ;
2018-02-24 00:48:18 +00:00
const BYTE * const base = ms - > window . base ;
const BYTE * const dictBase = ms - > window . dictBase ;
const U32 dictLimit = ms - > window . dictLimit ;
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
const BYTE * const dictEnd = dictBase + dictLimit ;
const BYTE * const prefixStart = base + dictLimit ;
const BYTE * match ;
const U32 current = ( U32 ) ( ip - base ) ;
const U32 btLow = btMask > = current ? 0 : current - btMask ;
U32 * smallerPtr = bt + 2 * ( current & btMask ) ;
U32 * largerPtr = smallerPtr + 1 ;
U32 dummy32 ; /* to be nullified at the end */
2018-02-24 00:48:18 +00:00
U32 const windowLow = ms - > window . lowLimit ;
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
U32 matchEndIdx = current + 8 + 1 ;
size_t bestLength = 8 ;
2017-12-13 00:51:00 +00:00
U32 nbCompares = 1U < < cParams - > searchLog ;
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
# ifdef ZSTD_C_PREDICT
U32 predictedSmall = * ( bt + 2 * ( ( current - 1 ) & btMask ) + 0 ) ;
U32 predictedLarge = * ( bt + 2 * ( ( current - 1 ) & btMask ) + 1 ) ;
predictedSmall + = ( predictedSmall > 0 ) ;
predictedLarge + = ( predictedLarge > 0 ) ;
# endif /* ZSTD_C_PREDICT */
DEBUGLOG ( 8 , " ZSTD_insertBt1 (%u) " , current ) ;
assert ( ip < = iend - 8 ) ; /* required for h calculation */
hashTable [ h ] = current ; /* Update Hash Table */
2018-12-19 18:11:06 +00:00
assert ( windowLow > 0 ) ;
while ( nbCompares - - & & ( matchIndex > = windowLow ) ) {
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
U32 * const nextPtr = bt + 2 * ( matchIndex & btMask ) ;
size_t matchLength = MIN ( commonLengthSmaller , commonLengthLarger ) ; /* guaranteed minimum nb of common bytes */
assert ( matchIndex < current ) ;
# ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
const U32 * predictPtr = bt + 2 * ( ( matchIndex - 1 ) & btMask ) ; /* written this way, as bt is a roll buffer */
if ( matchIndex = = predictedSmall ) {
/* no need to check length, result known */
* smallerPtr = matchIndex ;
if ( matchIndex < = btLow ) { smallerPtr = & dummy32 ; break ; } /* beyond tree size, stop the search */
smallerPtr = nextPtr + 1 ; /* new "smaller" => larger of match */
matchIndex = nextPtr [ 1 ] ; /* new matchIndex larger than previous (closer to current) */
predictedSmall = predictPtr [ 1 ] + ( predictPtr [ 1 ] > 0 ) ;
continue ;
}
if ( matchIndex = = predictedLarge ) {
* largerPtr = matchIndex ;
if ( matchIndex < = btLow ) { largerPtr = & dummy32 ; break ; } /* beyond tree size, stop the search */
largerPtr = nextPtr ;
matchIndex = nextPtr [ 0 ] ;
predictedLarge = predictPtr [ 0 ] + ( predictPtr [ 0 ] > 0 ) ;
continue ;
}
# endif
2018-06-21 15:20:01 +00:00
if ( ! extDict | | ( matchIndex + matchLength > = dictLimit ) ) {
2018-06-13 21:10:02 +00:00
assert ( matchIndex + matchLength > = dictLimit ) ; /* might be wrong if actually extDict */
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
match = base + matchIndex ;
matchLength + = ZSTD_count ( ip + matchLength , match + matchLength , iend ) ;
} else {
match = dictBase + matchIndex ;
matchLength + = ZSTD_count_2segments ( ip + matchLength , match + matchLength , iend , dictEnd , prefixStart ) ;
if ( matchIndex + matchLength > = dictLimit )
match = base + matchIndex ; /* to prepare for next usage of match[matchLength] */
}
if ( matchLength > bestLength ) {
bestLength = matchLength ;
if ( matchLength > matchEndIdx - matchIndex )
matchEndIdx = matchIndex + ( U32 ) matchLength ;
}
if ( ip + matchLength = = iend ) { /* equal : no way to know if inf or sup */
break ; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
}
if ( match [ matchLength ] < ip [ matchLength ] ) { /* necessarily within buffer */
/* match is smaller than current */
* smallerPtr = matchIndex ; /* update smaller idx */
commonLengthSmaller = matchLength ; /* all smaller will now have at least this guaranteed common length */
if ( matchIndex < = btLow ) { smallerPtr = & dummy32 ; break ; } /* beyond tree size, stop searching */
smallerPtr = nextPtr + 1 ; /* new "candidate" => larger than match, which was smaller than target */
matchIndex = nextPtr [ 1 ] ; /* new matchIndex, larger than previous and closer to current */
} else {
/* match is larger than current */
* largerPtr = matchIndex ;
commonLengthLarger = matchLength ;
if ( matchIndex < = btLow ) { largerPtr = & dummy32 ; break ; } /* beyond tree size, stop searching */
largerPtr = nextPtr ;
matchIndex = nextPtr [ 0 ] ;
} }
* smallerPtr = * largerPtr = 0 ;
2019-06-06 03:20:55 +00:00
{ U32 positions = 0 ;
if ( bestLength > 384 ) positions = MIN ( 192 , ( U32 ) ( bestLength - 384 ) ) ; /* speed optimization */
assert ( matchEndIdx > current + 8 ) ;
return MAX ( positions , matchEndIdx - ( current + 8 ) ) ;
}
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
}
FORCE_INLINE_TEMPLATE
2017-12-13 00:51:00 +00:00
void ZSTD_updateTree_internal (
2018-08-23 19:08:03 +00:00
ZSTD_matchState_t * ms ,
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
const BYTE * const ip , const BYTE * const iend ,
2018-06-13 21:10:02 +00:00
const U32 mls , const ZSTD_dictMode_e dictMode )
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
{
2018-02-24 00:48:18 +00:00
const BYTE * const base = ms - > window . base ;
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
U32 const target = ( U32 ) ( ip - base ) ;
2017-12-13 00:51:00 +00:00
U32 idx = ms - > nextToUpdate ;
2018-12-19 18:11:06 +00:00
DEBUGLOG ( 6 , " ZSTD_updateTree_internal, from %u to %u (dictMode:%u) " ,
2018-06-13 21:10:02 +00:00
idx , target , dictMode ) ;
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
2019-06-21 22:22:29 +00:00
while ( idx < target ) {
U32 const forward = ZSTD_insertBt1 ( ms , base + idx , iend , mls , dictMode = = ZSTD_extDict ) ;
assert ( idx < ( U32 ) ( idx + forward ) ) ;
idx + = forward ;
}
assert ( ( size_t ) ( ip - base ) < = ( size_t ) ( U32 ) ( - 1 ) ) ;
assert ( ( size_t ) ( iend - base ) < = ( size_t ) ( U32 ) ( - 1 ) ) ;
2017-12-13 00:51:00 +00:00
ms - > nextToUpdate = target ;
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
}
2018-08-23 19:08:03 +00:00
void ZSTD_updateTree ( ZSTD_matchState_t * ms , const BYTE * ip , const BYTE * iend ) {
2018-11-20 22:56:07 +00:00
ZSTD_updateTree_internal ( ms , ip , iend , ms - > cParams . minMatch , ZSTD_noDict ) ;
first implementation of delayed update for btlazy2
This is a pretty nice speed win.
The new strategy consists in stacking new candidates as if it was a hash chain.
Then, only if there is a need to actually consult the chain, they are batch-updated,
before starting the match search itself.
This is supposed to be beneficial when skipping positions,
which happens a lot when using lazy strategy.
The baseline performance for btlazy2 on my laptop is :
15#calgary.tar : 3265536 -> 955985 (3.416), 7.06 MB/s , 618.0 MB/s
15#enwik7 : 10000000 -> 3067341 (3.260), 4.65 MB/s , 521.2 MB/s
15#silesia.tar : 211984896 -> 58095131 (3.649), 6.20 MB/s , 682.4 MB/s
(only level 15 remains for btlazy2, as this strategy is squeezed between lazy2 and btopt)
After this patch, and keeping all parameters identical,
speed is increased by a pretty good margin (+30-50%),
but compression ratio suffers a bit :
15#calgary.tar : 3265536 -> 958060 (3.408), 9.12 MB/s , 621.1 MB/s
15#enwik7 : 10000000 -> 3078318 (3.249), 6.37 MB/s , 525.1 MB/s
15#silesia.tar : 211984896 -> 58444111 (3.627), 9.89 MB/s , 680.4 MB/s
That's because I kept `1<<searchLog` as a maximum number of candidates to update.
But for a hash chain, this represents the total number of candidates in the chain,
while for the binary, it represents the maximum depth of searches.
Keep in mind that a lot of candidates won't even be visited in the btree,
since they are filtered out by the binary sort.
As a consequence, in the new implementation,
the effective depth of the binary tree is substantially shorter.
To compensate, it's enough to increase `searchLog` value.
Here is the result after adding just +1 to searchLog (level 15 setting in this patch):
15#calgary.tar : 3265536 -> 956311 (3.415), 8.32 MB/s , 611.4 MB/s
15#enwik7 : 10000000 -> 3067655 (3.260), 5.43 MB/s , 535.5 MB/s
15#silesia.tar : 211984896 -> 58113144 (3.648), 8.35 MB/s , 679.3 MB/s
aka, almost the same compression ratio as before,
but with a noticeable speed increase (+20-30%).
This modification makes btlazy2 more competitive.
A new round of paramgrill will be necessary to determine which levels are impacted and could adopt the new strategy.
2017-12-28 15:58:57 +00:00
}
2017-11-16 23:02:28 +00:00
FORCE_INLINE_TEMPLATE
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
U32 ZSTD_insertBtAndGetAllMatches (
2019-05-28 23:11:32 +00:00
ZSTD_match_t * matches , /* store result (found matches) in this table (presumed large enough) */
2018-08-23 19:11:20 +00:00
ZSTD_matchState_t * ms ,
2019-05-28 23:18:12 +00:00
U32 * nextToUpdate3 ,
2018-06-13 21:10:02 +00:00
const BYTE * const ip , const BYTE * const iLimit , const ZSTD_dictMode_e dictMode ,
2019-05-28 23:11:32 +00:00
const U32 rep [ ZSTD_REP_NUM ] ,
2018-10-30 01:07:07 +00:00
U32 const ll0 , /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
const U32 lengthToBeat ,
U32 const mls /* template */ )
2017-09-02 01:28:35 +00:00
{
2018-08-23 19:11:20 +00:00
const ZSTD_compressionParameters * const cParams = & ms - > cParams ;
2017-12-13 00:51:00 +00:00
U32 const sufficient_len = MIN ( cParams - > targetLength , ZSTD_OPT_NUM - 1 ) ;
2018-02-24 00:48:18 +00:00
const BYTE * const base = ms - > window . base ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
U32 const current = ( U32 ) ( ip - base ) ;
2017-12-13 00:51:00 +00:00
U32 const hashLog = cParams - > hashLog ;
2017-11-13 10:19:36 +00:00
U32 const minMatch = ( mls = = 3 ) ? 3 : 4 ;
2017-12-13 00:51:00 +00:00
U32 * const hashTable = ms - > hashTable ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
size_t const h = ZSTD_hashPtr ( ip , hashLog , mls ) ;
2017-09-02 01:28:35 +00:00
U32 matchIndex = hashTable [ h ] ;
2017-12-13 00:51:00 +00:00
U32 * const bt = ms - > chainTable ;
U32 const btLog = cParams - > chainLog - 1 ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
U32 const btMask = ( 1U < < btLog ) - 1 ;
2017-09-02 01:28:35 +00:00
size_t commonLengthSmaller = 0 , commonLengthLarger = 0 ;
2018-02-24 00:48:18 +00:00
const BYTE * const dictBase = ms - > window . dictBase ;
U32 const dictLimit = ms - > window . dictLimit ;
2017-09-02 01:28:35 +00:00
const BYTE * const dictEnd = dictBase + dictLimit ;
const BYTE * const prefixStart = base + dictLimit ;
2019-05-31 22:55:12 +00:00
U32 const btLow = ( btMask > = current ) ? 0 : current - btMask ;
2019-08-05 13:18:43 +00:00
U32 const windowLow = ZSTD_getLowestMatchIndex ( ms , current , cParams - > windowLog ) ;
2018-06-15 18:08:58 +00:00
U32 const matchLow = windowLow ? windowLow : 1 ;
2017-09-02 01:28:35 +00:00
U32 * smallerPtr = bt + 2 * ( current & btMask ) ;
U32 * largerPtr = bt + 2 * ( current & btMask ) + 1 ;
2017-11-19 21:39:12 +00:00
U32 matchEndIdx = current + 8 + 1 ; /* farthest referenced position of any match => detects repetitive patterns */
2017-09-02 01:28:35 +00:00
U32 dummy32 ; /* to be nullified at the end */
U32 mnum = 0 ;
2017-12-13 00:51:00 +00:00
U32 nbCompares = 1U < < cParams - > searchLog ;
2017-09-02 01:28:35 +00:00
2018-06-20 20:54:53 +00:00
const ZSTD_matchState_t * dms = dictMode = = ZSTD_dictMatchState ? ms - > dictMatchState : NULL ;
2018-08-27 23:37:04 +00:00
const ZSTD_compressionParameters * const dmsCParams =
dictMode = = ZSTD_dictMatchState ? & dms - > cParams : NULL ;
2018-06-14 17:23:17 +00:00
const BYTE * const dmsBase = dictMode = = ZSTD_dictMatchState ? dms - > window . base : NULL ;
const BYTE * const dmsEnd = dictMode = = ZSTD_dictMatchState ? dms - > window . nextSrc : NULL ;
U32 const dmsHighLimit = dictMode = = ZSTD_dictMatchState ? ( U32 ) ( dmsEnd - dmsBase ) : 0 ;
U32 const dmsLowLimit = dictMode = = ZSTD_dictMatchState ? dms - > window . lowLimit : 0 ;
U32 const dmsIndexDelta = dictMode = = ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0 ;
2018-09-13 18:13:08 +00:00
U32 const dmsHashLog = dictMode = = ZSTD_dictMatchState ? dmsCParams - > hashLog : hashLog ;
U32 const dmsBtLog = dictMode = = ZSTD_dictMatchState ? dmsCParams - > chainLog - 1 : btLog ;
2018-08-27 23:37:04 +00:00
U32 const dmsBtMask = dictMode = = ZSTD_dictMatchState ? ( 1U < < dmsBtLog ) - 1 : 0 ;
U32 const dmsBtLow = dictMode = = ZSTD_dictMatchState & & dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit ;
2018-06-14 17:23:17 +00:00
2017-11-19 20:58:04 +00:00
size_t bestLength = lengthToBeat - 1 ;
2018-05-17 23:13:53 +00:00
DEBUGLOG ( 8 , " ZSTD_insertBtAndGetAllMatches: current=%u " , current ) ;
2017-09-02 01:28:35 +00:00
2017-11-13 10:19:36 +00:00
/* check repCode */
2018-10-30 01:07:07 +00:00
assert ( ll0 < = 1 ) ; /* necessarily 1 or 0 */
2017-11-15 22:37:40 +00:00
{ U32 const lastR = ZSTD_REP_NUM + ll0 ;
2017-11-13 10:19:36 +00:00
U32 repCode ;
for ( repCode = ll0 ; repCode < lastR ; repCode + + ) {
2017-11-15 22:37:40 +00:00
U32 const repOffset = ( repCode = = ZSTD_REP_NUM ) ? ( rep [ 0 ] - 1 ) : rep [ repCode ] ;
U32 const repIndex = current - repOffset ;
U32 repLen = 0 ;
assert ( current > = dictLimit ) ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
if ( repOffset - 1 /* intentional overflow, discards 0 and -1 */ < current - dictLimit ) { /* equivalent to `current > repIndex >= dictLimit` */
if ( ZSTD_readMINMATCH ( ip , minMatch ) = = ZSTD_readMINMATCH ( ip - repOffset , minMatch ) ) {
2017-11-15 22:37:40 +00:00
repLen = ( U32 ) ZSTD_count ( ip + minMatch , ip + minMatch - repOffset , iLimit ) + minMatch ;
}
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
} else { /* repIndex < dictLimit || repIndex >= current */
2018-06-14 19:53:46 +00:00
const BYTE * const repMatch = dictMode = = ZSTD_dictMatchState ?
dmsBase + repIndex - dmsIndexDelta :
dictBase + repIndex ;
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
assert ( current > = windowLow ) ;
2018-06-15 00:59:29 +00:00
if ( dictMode = = ZSTD_extDict
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
& & ( ( ( repOffset - 1 ) /*intentional overflow*/ < current - windowLow ) /* equivalent to `current > repIndex >= windowLow` */
& ( ( ( U32 ) ( ( dictLimit - 1 ) - repIndex ) > = 3 ) ) /* intentional overflow : do not test positions overlapping 2 memory segments */ )
2017-11-15 22:37:40 +00:00
& & ( ZSTD_readMINMATCH ( ip , minMatch ) = = ZSTD_readMINMATCH ( repMatch , minMatch ) ) ) {
repLen = ( U32 ) ZSTD_count_2segments ( ip + minMatch , repMatch + minMatch , iLimit , dictEnd , prefixStart ) + minMatch ;
2018-06-14 17:23:17 +00:00
}
if ( dictMode = = ZSTD_dictMatchState
2018-06-21 19:25:32 +00:00
& & ( ( ( repOffset - 1 ) /*intentional overflow*/ < current - ( dmsLowLimit + dmsIndexDelta ) ) /* equivalent to `current > repIndex >= dmsLowLimit` */
& ( ( U32 ) ( ( dictLimit - 1 ) - repIndex ) > = 3 ) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
2018-06-14 19:53:46 +00:00
& & ( ZSTD_readMINMATCH ( ip , minMatch ) = = ZSTD_readMINMATCH ( repMatch , minMatch ) ) ) {
repLen = ( U32 ) ZSTD_count_2segments ( ip + minMatch , repMatch + minMatch , iLimit , dmsEnd , prefixStart ) + minMatch ;
} }
2017-11-15 22:37:40 +00:00
/* save longer solution */
if ( repLen > bestLength ) {
2018-05-29 21:07:25 +00:00
DEBUGLOG ( 8 , " found repCode %u (ll0:%u, offset:%u) of length %u " ,
repCode , ll0 , repOffset , repLen ) ;
2017-11-15 22:37:40 +00:00
bestLength = repLen ;
matches [ mnum ] . off = repCode - ll0 ;
matches [ mnum ] . len = ( U32 ) repLen ;
mnum + + ;
2017-11-19 20:58:04 +00:00
if ( ( repLen > sufficient_len )
2017-11-15 22:37:40 +00:00
| ( ip + repLen = = iLimit ) ) { /* best possible */
return mnum ;
} } } }
2017-11-13 10:19:36 +00:00
2017-11-15 02:08:17 +00:00
/* HC3 match finder */
if ( ( mls = = 3 ) /*static*/ & & ( bestLength < mls ) ) {
2019-05-28 23:18:12 +00:00
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 ( ms , nextToUpdate3 , ip ) ;
2018-06-20 19:51:14 +00:00
if ( ( matchIndex3 > = matchLow )
2017-11-15 02:08:17 +00:00
& ( current - matchIndex3 < ( 1 < < 18 ) ) /*heuristic : longer distance likely too expensive*/ ) {
size_t mlen ;
2018-06-15 00:59:29 +00:00
if ( ( dictMode = = ZSTD_noDict ) /*static*/ | | ( dictMode = = ZSTD_dictMatchState ) /*static*/ | | ( matchIndex3 > = dictLimit ) ) {
2017-11-15 02:08:17 +00:00
const BYTE * const match = base + matchIndex3 ;
mlen = ZSTD_count ( ip , match , iLimit ) ;
2017-09-02 01:28:35 +00:00
} else {
2017-11-15 02:08:17 +00:00
const BYTE * const match = dictBase + matchIndex3 ;
mlen = ZSTD_count_2segments ( ip , match , iLimit , dictEnd , prefixStart ) ;
2017-09-02 01:28:35 +00:00
}
/* save best solution */
2017-11-15 02:08:17 +00:00
if ( mlen > = mls /* == 3 > bestLength */ ) {
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
DEBUGLOG ( 8 , " found small match with hlog3, of length %u " ,
( U32 ) mlen ) ;
2017-11-15 02:08:17 +00:00
bestLength = mlen ;
assert ( current > matchIndex3 ) ;
assert ( mnum = = 0 ) ; /* no prior solution */
matches [ 0 ] . off = ( current - matchIndex3 ) + ZSTD_REP_MOVE ;
matches [ 0 ] . len = ( U32 ) mlen ;
2017-11-15 19:29:24 +00:00
mnum = 1 ;
2017-11-19 21:39:12 +00:00
if ( ( mlen > sufficient_len ) |
( ip + mlen = = iLimit ) ) { /* best possible length */
2017-12-13 00:51:00 +00:00
ms - > nextToUpdate = current + 1 ; /* skip insertion */
2017-11-15 19:29:24 +00:00
return 1 ;
2019-05-28 22:26:52 +00:00
} } }
2018-06-20 02:18:08 +00:00
/* no dictMatchState lookup: dicts don't have a populated HC3 table */
2018-06-14 18:53:04 +00:00
}
2017-09-02 01:28:35 +00:00
hashTable [ h ] = current ; /* Update Hash Table */
2018-06-15 18:08:58 +00:00
while ( nbCompares - - & & ( matchIndex > = matchLow ) ) {
2017-11-15 02:08:17 +00:00
U32 * const nextPtr = bt + 2 * ( matchIndex & btMask ) ;
2017-09-02 01:28:35 +00:00
const BYTE * match ;
2019-08-02 12:42:53 +00:00
size_t matchLength = MIN ( commonLengthSmaller , commonLengthLarger ) ; /* guaranteed minimum nb of common bytes */
2017-11-15 02:08:17 +00:00
assert ( current > matchIndex ) ;
2017-11-15 21:44:24 +00:00
2018-06-14 19:53:46 +00:00
if ( ( dictMode = = ZSTD_noDict ) | | ( dictMode = = ZSTD_dictMatchState ) | | ( matchIndex + matchLength > = dictLimit ) ) {
2017-11-15 21:44:24 +00:00
assert ( matchIndex + matchLength > = dictLimit ) ; /* ensure the condition is correct when !extDict */
2017-09-02 01:28:35 +00:00
match = base + matchIndex ;
2019-08-02 12:42:53 +00:00
if ( matchIndex > = dictLimit ) assert ( memcmp ( match , ip , matchLength ) = = 0 ) ; /* ensure early section of match is equal as expected */
2017-11-19 22:40:21 +00:00
matchLength + = ZSTD_count ( ip + matchLength , match + matchLength , iLimit ) ;
2017-09-02 01:28:35 +00:00
} else {
match = dictBase + matchIndex ;
2019-08-02 12:42:53 +00:00
assert ( memcmp ( match , ip , matchLength ) = = 0 ) ; /* ensure early section of match is equal as expected */
2017-09-02 01:28:35 +00:00
matchLength + = ZSTD_count_2segments ( ip + matchLength , match + matchLength , iLimit , dictEnd , prefixStart ) ;
if ( matchIndex + matchLength > = dictLimit )
2019-08-02 12:42:53 +00:00
match = base + matchIndex ; /* prepare for match[matchLength] read */
2017-09-02 01:28:35 +00:00
}
if ( matchLength > bestLength ) {
2018-05-29 21:07:25 +00:00
DEBUGLOG ( 8 , " found match of length %u at distance %u (offCode=%u) " ,
( U32 ) matchLength , current - matchIndex , current - matchIndex + ZSTD_REP_MOVE ) ;
2017-11-15 02:08:17 +00:00
assert ( matchEndIdx > matchIndex ) ;
if ( matchLength > matchEndIdx - matchIndex )
matchEndIdx = matchIndex + ( U32 ) matchLength ;
2017-09-02 01:28:35 +00:00
bestLength = matchLength ;
2017-11-15 02:08:17 +00:00
matches [ mnum ] . off = ( current - matchIndex ) + ZSTD_REP_MOVE ;
2017-09-02 01:28:35 +00:00
matches [ mnum ] . len = ( U32 ) matchLength ;
mnum + + ;
2018-06-14 20:59:49 +00:00
if ( ( matchLength > ZSTD_OPT_NUM )
| ( ip + matchLength = = iLimit ) /* equal : no way to know if inf or sup */ ) {
2018-06-14 19:54:03 +00:00
if ( dictMode = = ZSTD_dictMatchState ) nbCompares = 0 ; /* break should also skip searching dms */
break ; /* drop, to preserve bt consistency (miss a little bit of compression) */
2017-11-15 02:08:17 +00:00
}
2017-09-02 01:28:35 +00:00
}
if ( match [ matchLength ] < ip [ matchLength ] ) {
2017-11-15 21:44:24 +00:00
/* match smaller than current */
2017-09-02 01:28:35 +00:00
* smallerPtr = matchIndex ; /* update smaller idx */
commonLengthSmaller = matchLength ; /* all smaller will now have at least this guaranteed common length */
if ( matchIndex < = btLow ) { smallerPtr = & dummy32 ; break ; } /* beyond tree size, stop the search */
2017-11-15 21:44:24 +00:00
smallerPtr = nextPtr + 1 ; /* new candidate => larger than match, which was smaller than current */
matchIndex = nextPtr [ 1 ] ; /* new matchIndex, larger than previous, closer to current */
2017-09-02 01:28:35 +00:00
} else {
* largerPtr = matchIndex ;
commonLengthLarger = matchLength ;
if ( matchIndex < = btLow ) { largerPtr = & dummy32 ; break ; } /* beyond tree size, stop the search */
largerPtr = nextPtr ;
matchIndex = nextPtr [ 0 ] ;
} }
* smallerPtr = * largerPtr = 0 ;
2018-06-14 19:54:03 +00:00
if ( dictMode = = ZSTD_dictMatchState & & nbCompares ) {
2018-08-27 23:37:04 +00:00
size_t const dmsH = ZSTD_hashPtr ( ip , dmsHashLog , mls ) ;
U32 dictMatchIndex = dms - > hashTable [ dmsH ] ;
2018-06-14 19:54:03 +00:00
const U32 * const dmsBt = dms - > chainTable ;
2018-06-20 19:27:23 +00:00
commonLengthSmaller = commonLengthLarger = 0 ;
2018-06-14 19:54:03 +00:00
while ( nbCompares - - & & ( dictMatchIndex > dmsLowLimit ) ) {
2018-08-27 23:37:04 +00:00
const U32 * const nextPtr = dmsBt + 2 * ( dictMatchIndex & dmsBtMask ) ;
2018-06-14 19:54:03 +00:00
size_t matchLength = MIN ( commonLengthSmaller , commonLengthLarger ) ; /* guaranteed minimum nb of common bytes */
const BYTE * match = dmsBase + dictMatchIndex ;
matchLength + = ZSTD_count_2segments ( ip + matchLength , match + matchLength , iLimit , dmsEnd , prefixStart ) ;
if ( dictMatchIndex + matchLength > = dmsHighLimit )
match = base + dictMatchIndex + dmsIndexDelta ; /* to prepare for next usage of match[matchLength] */
if ( matchLength > bestLength ) {
matchIndex = dictMatchIndex + dmsIndexDelta ;
2018-06-21 19:25:44 +00:00
DEBUGLOG ( 8 , " found dms match of length %u at distance %u (offCode=%u) " ,
( U32 ) matchLength , current - matchIndex , current - matchIndex + ZSTD_REP_MOVE ) ;
2018-06-14 19:54:03 +00:00
if ( matchLength > matchEndIdx - matchIndex )
matchEndIdx = matchIndex + ( U32 ) matchLength ;
bestLength = matchLength ;
matches [ mnum ] . off = ( current - matchIndex ) + ZSTD_REP_MOVE ;
matches [ mnum ] . len = ( U32 ) matchLength ;
mnum + + ;
2018-06-14 20:59:49 +00:00
if ( ( matchLength > ZSTD_OPT_NUM )
| ( ip + matchLength = = iLimit ) /* equal : no way to know if inf or sup */ ) {
2018-06-14 19:54:03 +00:00
break ; /* drop, to guarantee consistency (miss a little bit of compression) */
}
}
2018-06-15 00:59:29 +00:00
if ( dictMatchIndex < = dmsBtLow ) { break ; } /* beyond tree size, stop the search */
2018-06-14 19:54:03 +00:00
if ( match [ matchLength ] < ip [ matchLength ] ) {
commonLengthSmaller = matchLength ; /* all smaller will now have at least this guaranteed common length */
dictMatchIndex = nextPtr [ 1 ] ; /* new matchIndex larger than previous (closer to current) */
} else {
/* match is larger than current */
commonLengthLarger = matchLength ;
dictMatchIndex = nextPtr [ 0 ] ;
}
}
}
2017-11-19 21:39:12 +00:00
assert ( matchEndIdx > current + 8 ) ;
2017-12-13 00:51:00 +00:00
ms - > nextToUpdate = matchEndIdx - 8 ; /* skip repetitive patterns */
2017-09-02 01:28:35 +00:00
return mnum ;
}
2017-11-15 21:44:24 +00:00
FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
2019-05-28 23:11:32 +00:00
ZSTD_match_t * matches , /* store result (match found, increasing size) in this table */
2018-08-23 19:11:20 +00:00
ZSTD_matchState_t * ms ,
2019-05-28 23:18:12 +00:00
U32 * nextToUpdate3 ,
2018-06-13 21:10:02 +00:00
const BYTE * ip , const BYTE * const iHighLimit , const ZSTD_dictMode_e dictMode ,
2019-05-28 23:11:32 +00:00
const U32 rep [ ZSTD_REP_NUM ] ,
U32 const ll0 ,
U32 const lengthToBeat )
2017-09-02 01:28:35 +00:00
{
2018-08-23 19:11:20 +00:00
const ZSTD_compressionParameters * const cParams = & ms - > cParams ;
2018-11-20 22:56:07 +00:00
U32 const matchLengthSearch = cParams - > minMatch ;
2018-05-09 22:46:11 +00:00
DEBUGLOG ( 8 , " ZSTD_BtGetAllMatches " ) ;
2018-02-24 00:48:18 +00:00
if ( ip < ms - > window . base + ms - > nextToUpdate ) return 0 ; /* skipped area */
2018-08-23 19:08:03 +00:00
ZSTD_updateTree_internal ( ms , ip , iHighLimit , matchLengthSearch , dictMode ) ;
2017-09-02 01:28:35 +00:00
switch ( matchLengthSearch )
{
2019-05-28 23:18:12 +00:00
case 3 : return ZSTD_insertBtAndGetAllMatches ( matches , ms , nextToUpdate3 , ip , iHighLimit , dictMode , rep , ll0 , lengthToBeat , 3 ) ;
2017-09-02 01:28:35 +00:00
default :
2019-05-28 23:18:12 +00:00
case 4 : return ZSTD_insertBtAndGetAllMatches ( matches , ms , nextToUpdate3 , ip , iHighLimit , dictMode , rep , ll0 , lengthToBeat , 4 ) ;
case 5 : return ZSTD_insertBtAndGetAllMatches ( matches , ms , nextToUpdate3 , ip , iHighLimit , dictMode , rep , ll0 , lengthToBeat , 5 ) ;
2017-09-02 01:28:35 +00:00
case 7 :
2019-05-28 23:18:12 +00:00
case 6 : return ZSTD_insertBtAndGetAllMatches ( matches , ms , nextToUpdate3 , ip , iHighLimit , dictMode , rep , ll0 , lengthToBeat , 6 ) ;
2017-09-02 01:28:35 +00:00
}
}
/*-*******************************
* Optimal parser
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2017-11-13 10:19:36 +00:00
typedef struct repcodes_s {
U32 rep [ 3 ] ;
} repcodes_t ;
2018-09-27 22:13:43 +00:00
static repcodes_t ZSTD_updateRep ( U32 const rep [ 3 ] , U32 const offset , U32 const ll0 )
2017-11-13 10:19:36 +00:00
{
repcodes_t newReps ;
if ( offset > = ZSTD_REP_NUM ) { /* full offset */
newReps . rep [ 2 ] = rep [ 1 ] ;
newReps . rep [ 1 ] = rep [ 0 ] ;
newReps . rep [ 0 ] = offset - ZSTD_REP_MOVE ;
} else { /* repcode */
U32 const repCode = offset + ll0 ;
if ( repCode > 0 ) { /* note : if repCode==0, no change */
U32 const currentOffset = ( repCode = = ZSTD_REP_NUM ) ? ( rep [ 0 ] - 1 ) : rep [ repCode ] ;
newReps . rep [ 2 ] = ( repCode > = 2 ) ? rep [ 1 ] : rep [ 2 ] ;
newReps . rep [ 1 ] = rep [ 0 ] ;
newReps . rep [ 0 ] = currentOffset ;
} else { /* repCode == 0 */
memcpy ( & newReps , rep , sizeof ( newReps ) ) ;
}
}
return newReps ;
}
2018-05-29 21:07:25 +00:00
static U32 ZSTD_totalLen ( ZSTD_optimal_t sol )
zstd_opt: changed cost formula
There was a flaw in the formula
which compared literal cost with match cost :
at a given position,
a non-null literal suite is going to be part of next sequence,
while if position ends a previous match, to immediately start another match,
next sequence will have a litlength of zero.
A litlength of zero has a non-null cost.
It follows that literals cost should be compared to match cost + litlength==0.
Not doing so gave a structural advantage to matches, which would be selected more often.
I believe that's what led to the creation of the strange heuristic which added a complex cost to matches.
The heuristic was actually compensating.
It was probably created through multiple trials, settling for best outcome on a given scenario (I suspect silesia.tar).
The problem with this heuristic is that it's hard to understand,
and unfortunately, any future change in the parser would impact the way it should be calculated and its effects.
The "proper" formula makes it possible to remove this heuristic.
Now, the problem is : in a head to head comparison, it's sometimes better, sometimes worse.
Note that all differences are small (< 0.01 ratio).
In general, the newer formula is better for smaller files (for example, calgary.tar and enwik7).
I suspect that's because starting statistics are pretty poor (another area of improvement).
However, for silesia.tar specifically, it's worse at level 22 (while being better at level 17, so even compression level has an impact ...).
It's a pity that zstd -22 gets worse on silesia.tar.
That being said, I like that the new code gets rid of strange variables,
which were introducing complexity for any future evolution (faster variants being in mind).
Therefore, in spite of this detrimental side effect, I tend to be in favor of it.
2017-11-28 22:07:03 +00:00
{
2018-05-29 21:07:25 +00:00
return sol . litlen + sol . mlen ;
zstd_opt: changed cost formula
There was a flaw in the formula
which compared literal cost with match cost :
at a given position,
a non-null literal suite is going to be part of next sequence,
while if position ends a previous match, to immediately start another match,
next sequence will have a litlength of zero.
A litlength of zero has a non-null cost.
It follows that literals cost should be compared to match cost + litlength==0.
Not doing so gave a structural advantage to matches, which would be selected more often.
I believe that's what led to the creation of the strange heuristic which added a complex cost to matches.
The heuristic was actually compensating.
It was probably created through multiple trials, settling for best outcome on a given scenario (I suspect silesia.tar).
The problem with this heuristic is that it's hard to understand,
and unfortunately, any future change in the parser would impact the way it should be calculated and its effects.
The "proper" formula makes it possible to remove this heuristic.
Now, the problem is : in a head to head comparison, it's sometimes better, sometimes worse.
Note that all differences are small (< 0.01 ratio).
In general, the newer formula is better for smaller files (for example, calgary.tar and enwik7).
I suspect that's because starting statistics are pretty poor (another area of improvement).
However, for silesia.tar specifically, it's worse at level 22 (while being better at level 17, so even compression level has an impact ...).
It's a pity that zstd -22 gets worse on silesia.tar.
That being said, I like that the new code gets rid of strange variables,
which were introducing complexity for any future evolution (faster variants being in mind).
Therefore, in spite of this detrimental side effect, I tend to be in favor of it.
2017-11-28 22:07:03 +00:00
}
2018-12-18 20:32:58 +00:00
#if 0 /* debug */
2018-12-17 23:48:34 +00:00
static void
listStats ( const U32 * table , int lastEltID )
{
int const nbElts = lastEltID + 1 ;
int enb ;
for ( enb = 0 ; enb < nbElts ; enb + + ) {
( void ) table ;
//RAWLOG(2, "%3i:%3i, ", enb, table[enb]);
RAWLOG ( 2 , " %4i, " , table [ enb ] ) ;
}
RAWLOG ( 2 , " \n " ) ;
}
2018-12-18 20:32:58 +00:00
# endif
2018-05-08 19:32:16 +00:00
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_opt_generic ( ZSTD_matchState_t * ms ,
seqStore_t * seqStore ,
U32 rep [ ZSTD_REP_NUM ] ,
2018-12-19 18:11:06 +00:00
const void * src , size_t srcSize ,
const int optLevel ,
const ZSTD_dictMode_e dictMode )
2017-09-02 01:28:35 +00:00
{
2017-12-13 00:51:00 +00:00
optState_t * const optStatePtr = & ms - > opt ;
2017-09-02 01:28:35 +00:00
const BYTE * const istart = ( const BYTE * ) src ;
const BYTE * ip = istart ;
const BYTE * anchor = istart ;
const BYTE * const iend = istart + srcSize ;
const BYTE * const ilimit = iend - 8 ;
2018-02-24 00:48:18 +00:00
const BYTE * const base = ms - > window . base ;
const BYTE * const prefixStart = base + ms - > window . dictLimit ;
2018-08-23 18:53:34 +00:00
const ZSTD_compressionParameters * const cParams = & ms - > cParams ;
2017-09-02 01:28:35 +00:00
2017-12-13 00:51:00 +00:00
U32 const sufficient_len = MIN ( cParams - > targetLength , ZSTD_OPT_NUM - 1 ) ;
2018-11-20 22:56:07 +00:00
U32 const minMatch = ( cParams - > minMatch = = 3 ) ? 3 : 4 ;
2019-05-28 23:18:12 +00:00
U32 nextToUpdate3 = ms - > nextToUpdate ;
2017-09-02 01:28:35 +00:00
2017-11-08 19:05:32 +00:00
ZSTD_optimal_t * const opt = optStatePtr - > priceTable ;
ZSTD_match_t * const matches = optStatePtr - > matchTable ;
2018-05-29 21:07:25 +00:00
ZSTD_optimal_t lastSequence ;
2017-09-02 01:28:35 +00:00
/* init */
2018-12-19 18:11:06 +00:00
DEBUGLOG ( 5 , " ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u " ,
( U32 ) ( ip - base ) , ms - > window . dictLimit , ms - > nextToUpdate ) ;
2018-05-16 21:53:35 +00:00
assert ( optLevel < = 2 ) ;
ZSTD_rescaleFreqs ( optStatePtr , ( const BYTE * ) src , srcSize , optLevel ) ;
2017-09-02 01:28:35 +00:00
ip + = ( ip = = prefixStart ) ;
/* Match Loop */
while ( ip < ilimit ) {
2017-11-09 20:46:34 +00:00
U32 cur , last_pos = 0 ;
2017-11-13 10:19:36 +00:00
/* find first match */
2017-11-15 02:08:17 +00:00
{ U32 const litlen = ( U32 ) ( ip - anchor ) ;
U32 const ll0 = ! litlen ;
2019-05-28 23:18:12 +00:00
U32 const nbMatches = ZSTD_BtGetAllMatches ( matches , ms , & nextToUpdate3 , ip , iend , dictMode , rep , ll0 , minMatch ) ;
2017-11-15 02:08:17 +00:00
if ( ! nbMatches ) { ip + + ; continue ; }
/* initialize opt[0] */
{ U32 i ; for ( i = 0 ; i < ZSTD_REP_NUM ; i + + ) opt [ 0 ] . rep [ i ] = rep [ i ] ; }
2018-05-29 21:07:25 +00:00
opt [ 0 ] . mlen = 0 ; /* means is_a_literal */
2017-11-15 02:08:17 +00:00
opt [ 0 ] . litlen = litlen ;
2020-03-05 00:29:19 +00:00
/* We don't need to include the actual price of the literals because
* it is static for the duration of the forward pass , and is included
* in every price . We include the literal length to avoid negative
* prices when we subtract the previous literal length .
*/
2020-03-05 00:12:16 +00:00
opt [ 0 ] . price = ZSTD_litLengthPrice ( litlen , optStatePtr , optLevel ) ;
2017-11-15 02:08:17 +00:00
/* large match -> immediate encoding */
{ U32 const maxML = matches [ nbMatches - 1 ] . len ;
2018-05-08 19:32:16 +00:00
U32 const maxOffset = matches [ nbMatches - 1 ] . off ;
2019-04-12 18:18:11 +00:00
DEBUGLOG ( 6 , " found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series " ,
2018-05-08 19:32:16 +00:00
nbMatches , maxML , maxOffset , ( U32 ) ( ip - prefixStart ) ) ;
2017-11-15 02:08:17 +00:00
if ( maxML > sufficient_len ) {
2018-05-29 21:07:25 +00:00
lastSequence . litlen = litlen ;
lastSequence . mlen = maxML ;
lastSequence . off = maxOffset ;
DEBUGLOG ( 6 , " large match (%u>%u), immediate encoding " ,
maxML , sufficient_len ) ;
2017-11-15 02:08:17 +00:00
cur = 0 ;
2018-05-29 21:07:25 +00:00
last_pos = ZSTD_totalLen ( lastSequence ) ;
2017-11-15 02:08:17 +00:00
goto _shortestPath ;
} }
2017-09-02 01:28:35 +00:00
2017-11-15 02:08:17 +00:00
/* set prices for first matches starting position == 0 */
2018-05-29 21:07:25 +00:00
{ U32 const literalsPrice = opt [ 0 ] . price + ZSTD_litLengthPrice ( 0 , optStatePtr , optLevel ) ;
2017-11-28 20:14:46 +00:00
U32 pos ;
2017-11-13 10:19:36 +00:00
U32 matchNb ;
2018-05-08 19:32:16 +00:00
for ( pos = 1 ; pos < minMatch ; pos + + ) {
opt [ pos ] . price = ZSTD_MAX_PRICE ; /* mlen, litlen and price will be fixed during forward scanning */
2017-11-28 01:13:59 +00:00
}
2017-11-15 02:08:17 +00:00
for ( matchNb = 0 ; matchNb < nbMatches ; matchNb + + ) {
2017-11-13 10:19:36 +00:00
U32 const offset = matches [ matchNb ] . off ;
2017-11-09 20:46:34 +00:00
U32 const end = matches [ matchNb ] . len ;
2017-11-15 02:08:17 +00:00
repcodes_t const repHistory = ZSTD_updateRep ( rep , offset , ll0 ) ;
2017-11-28 01:13:59 +00:00
for ( ; pos < = end ; pos + + ) {
2018-05-08 19:32:16 +00:00
U32 const matchPrice = ZSTD_getMatchPrice ( offset , pos , optStatePtr , optLevel ) ;
U32 const sequencePrice = literalsPrice + matchPrice ;
2018-05-09 22:46:11 +00:00
DEBUGLOG ( 7 , " rPos:%u => set initial price : %.2f " ,
pos , ZSTD_fCost ( sequencePrice ) ) ;
2017-11-28 01:13:59 +00:00
opt [ pos ] . mlen = pos ;
opt [ pos ] . off = offset ;
opt [ pos ] . litlen = litlen ;
2018-05-08 19:32:16 +00:00
opt [ pos ] . price = sequencePrice ;
ZSTD_STATIC_ASSERT ( sizeof ( opt [ pos ] . rep ) = = sizeof ( repHistory ) ) ;
2017-11-28 01:13:59 +00:00
memcpy ( opt [ pos ] . rep , & repHistory , sizeof ( repHistory ) ) ;
} }
last_pos = pos - 1 ;
}
2017-11-15 02:08:17 +00:00
}
2017-09-02 01:28:35 +00:00
2017-11-09 20:46:34 +00:00
/* check further positions */
2017-09-02 01:28:35 +00:00
for ( cur = 1 ; cur < = last_pos ; cur + + ) {
2017-11-09 20:46:34 +00:00
const BYTE * const inr = ip + cur ;
assert ( cur < ZSTD_OPT_NUM ) ;
2018-05-29 21:07:25 +00:00
DEBUGLOG ( 7 , " cPos:%zi==rPos:%u " , inr - istart , cur )
2017-09-02 01:28:35 +00:00
2017-11-09 20:46:34 +00:00
/* Fix current position with one literal if cheaper */
2018-05-29 21:07:25 +00:00
{ U32 const litlen = ( opt [ cur - 1 ] . mlen = = 0 ) ? opt [ cur - 1 ] . litlen + 1 : 1 ;
int const price = opt [ cur - 1 ] . price
+ ZSTD_rawLiteralsCost ( ip + cur - 1 , 1 , optStatePtr , optLevel )
+ ZSTD_litLengthPrice ( litlen , optStatePtr , optLevel )
- ZSTD_litLengthPrice ( litlen - 1 , optStatePtr , optLevel ) ;
zstd_opt: changed cost formula
There was a flaw in the formula
which compared literal cost with match cost :
at a given position,
a non-null literal suite is going to be part of next sequence,
while if position ends a previous match, to immediately start another match,
next sequence will have a litlength of zero.
A litlength of zero has a non-null cost.
It follows that literals cost should be compared to match cost + litlength==0.
Not doing so gave a structural advantage to matches, which would be selected more often.
I believe that's what led to the creation of the strange heuristic which added a complex cost to matches.
The heuristic was actually compensating.
It was probably created through multiple trials, settling for best outcome on a given scenario (I suspect silesia.tar).
The problem with this heuristic is that it's hard to understand,
and unfortunately, any future change in the parser would impact the way it should be calculated and its effects.
The "proper" formula makes it possible to remove this heuristic.
Now, the problem is : in a head to head comparison, it's sometimes better, sometimes worse.
Note that all differences are small (< 0.01 ratio).
In general, the newer formula is better for smaller files (for example, calgary.tar and enwik7).
I suspect that's because starting statistics are pretty poor (another area of improvement).
However, for silesia.tar specifically, it's worse at level 22 (while being better at level 17, so even compression level has an impact ...).
It's a pity that zstd -22 gets worse on silesia.tar.
That being said, I like that the new code gets rid of strange variables,
which were introducing complexity for any future evolution (faster variants being in mind).
Therefore, in spite of this detrimental side effect, I tend to be in favor of it.
2017-11-28 22:07:03 +00:00
assert ( price < 1000000000 ) ; /* overflow check */
2017-11-13 10:19:36 +00:00
if ( price < = opt [ cur ] . price ) {
2018-05-29 21:07:25 +00:00
DEBUGLOG ( 7 , " cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u) " ,
inr - istart , cur , ZSTD_fCost ( price ) , ZSTD_fCost ( opt [ cur ] . price ) , litlen ,
opt [ cur - 1 ] . rep [ 0 ] , opt [ cur - 1 ] . rep [ 1 ] , opt [ cur - 1 ] . rep [ 2 ] ) ;
opt [ cur ] . mlen = 0 ;
2017-11-28 20:14:46 +00:00
opt [ cur ] . off = 0 ;
opt [ cur ] . litlen = litlen ;
opt [ cur ] . price = price ;
memcpy ( opt [ cur ] . rep , opt [ cur - 1 ] . rep , sizeof ( opt [ cur ] . rep ) ) ;
2018-05-09 22:46:11 +00:00
} else {
2018-05-29 21:07:25 +00:00
DEBUGLOG ( 7 , " cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u) " ,
inr - istart , cur , ZSTD_fCost ( price ) , ZSTD_fCost ( opt [ cur ] . price ) ,
opt [ cur ] . rep [ 0 ] , opt [ cur ] . rep [ 1 ] , opt [ cur ] . rep [ 2 ] ) ;
2018-05-09 22:46:11 +00:00
}
}
2017-11-09 20:46:34 +00:00
/* last match must start at a minimum distance of 8 from oend */
if ( inr > ilimit ) continue ;
2017-11-19 18:38:02 +00:00
if ( cur = = last_pos ) break ;
2018-05-08 19:32:16 +00:00
if ( ( optLevel = = 0 ) /*static_test*/
2018-05-09 22:46:11 +00:00
& & ( opt [ cur + 1 ] . price < = opt [ cur ] . price + ( BITCOST_MULTIPLIER / 2 ) ) ) {
DEBUGLOG ( 7 , " move to next rPos:%u : price is <= " , cur + 1 ) ;
2017-11-19 18:38:02 +00:00
continue ; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
2018-05-09 22:46:11 +00:00
}
2017-11-19 18:38:02 +00:00
2018-05-29 21:07:25 +00:00
{ U32 const ll0 = ( opt [ cur ] . mlen ! = 0 ) ;
U32 const litlen = ( opt [ cur ] . mlen = = 0 ) ? opt [ cur ] . litlen : 0 ;
U32 const previousPrice = opt [ cur ] . price ;
U32 const basePrice = previousPrice + ZSTD_litLengthPrice ( 0 , optStatePtr , optLevel ) ;
2019-05-28 23:18:12 +00:00
U32 const nbMatches = ZSTD_BtGetAllMatches ( matches , ms , & nextToUpdate3 , inr , iend , dictMode , opt [ cur ] . rep , ll0 , minMatch ) ;
2017-11-09 20:46:34 +00:00
U32 matchNb ;
2018-05-09 22:46:11 +00:00
if ( ! nbMatches ) {
DEBUGLOG ( 7 , " rPos:%u : no match found " , cur ) ;
continue ;
}
2017-11-15 02:08:17 +00:00
{ U32 const maxML = matches [ nbMatches - 1 ] . len ;
2018-05-29 21:07:25 +00:00
DEBUGLOG ( 7 , " cPos:%zi==rPos:%u, found %u matches, of maxLength=%u " ,
inr - istart , cur , nbMatches , maxML ) ;
2017-11-15 02:08:17 +00:00
if ( ( maxML > sufficient_len )
2018-05-08 19:32:16 +00:00
| | ( cur + maxML > = ZSTD_OPT_NUM ) ) {
2018-05-29 21:07:25 +00:00
lastSequence . mlen = maxML ;
lastSequence . off = matches [ nbMatches - 1 ] . off ;
lastSequence . litlen = litlen ;
cur - = ( opt [ cur ] . mlen = = 0 ) ? opt [ cur ] . litlen : 0 ; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
last_pos = cur + ZSTD_totalLen ( lastSequence ) ;
if ( cur > ZSTD_OPT_NUM ) cur = 0 ; /* underflow => first match */
2017-11-15 02:08:17 +00:00
goto _shortestPath ;
2018-05-08 19:32:16 +00:00
} }
2017-09-02 01:28:35 +00:00
2017-11-15 02:08:17 +00:00
/* set prices using matches found at position == cur */
for ( matchNb = 0 ; matchNb < nbMatches ; matchNb + + ) {
2017-11-13 10:19:36 +00:00
U32 const offset = matches [ matchNb ] . off ;
2017-11-15 02:08:17 +00:00
repcodes_t const repHistory = ZSTD_updateRep ( opt [ cur ] . rep , offset , ll0 ) ;
2017-11-19 18:21:21 +00:00
U32 const lastML = matches [ matchNb ] . len ;
U32 const startML = ( matchNb > 0 ) ? matches [ matchNb - 1 ] . len + 1 : minMatch ;
U32 mlen ;
2017-11-13 10:19:36 +00:00
2018-05-29 21:07:25 +00:00
DEBUGLOG ( 7 , " testing match %u => offCode=%4u, mlen=%2u, llen=%2u " ,
2017-11-13 10:19:36 +00:00
matchNb , matches [ matchNb ] . off , lastML , litlen ) ;
2017-09-02 01:28:35 +00:00
2018-05-08 19:32:16 +00:00
for ( mlen = lastML ; mlen > = startML ; mlen - - ) { /* scan downward */
2017-11-15 02:08:17 +00:00
U32 const pos = cur + mlen ;
zstd_opt: changed cost formula
There was a flaw in the formula
which compared literal cost with match cost :
at a given position,
a non-null literal suite is going to be part of next sequence,
while if position ends a previous match, to immediately start another match,
next sequence will have a litlength of zero.
A litlength of zero has a non-null cost.
It follows that literals cost should be compared to match cost + litlength==0.
Not doing so gave a structural advantage to matches, which would be selected more often.
I believe that's what led to the creation of the strange heuristic which added a complex cost to matches.
The heuristic was actually compensating.
It was probably created through multiple trials, settling for best outcome on a given scenario (I suspect silesia.tar).
The problem with this heuristic is that it's hard to understand,
and unfortunately, any future change in the parser would impact the way it should be calculated and its effects.
The "proper" formula makes it possible to remove this heuristic.
Now, the problem is : in a head to head comparison, it's sometimes better, sometimes worse.
Note that all differences are small (< 0.01 ratio).
In general, the newer formula is better for smaller files (for example, calgary.tar and enwik7).
I suspect that's because starting statistics are pretty poor (another area of improvement).
However, for silesia.tar specifically, it's worse at level 22 (while being better at level 17, so even compression level has an impact ...).
It's a pity that zstd -22 gets worse on silesia.tar.
That being said, I like that the new code gets rid of strange variables,
which were introducing complexity for any future evolution (faster variants being in mind).
Therefore, in spite of this detrimental side effect, I tend to be in favor of it.
2017-11-28 22:07:03 +00:00
int const price = basePrice + ZSTD_getMatchPrice ( offset , mlen , optStatePtr , optLevel ) ;
2017-09-02 01:28:35 +00:00
2017-11-17 23:51:52 +00:00
if ( ( pos > last_pos ) | | ( price < opt [ pos ] . price ) ) {
2018-05-29 21:07:25 +00:00
DEBUGLOG ( 7 , " rPos:%u (ml=%2u) => new better price (%.2f<%.2f) " ,
pos , mlen , ZSTD_fCost ( price ) , ZSTD_fCost ( opt [ pos ] . price ) ) ;
2018-05-08 19:32:16 +00:00
while ( last_pos < pos ) { opt [ last_pos + 1 ] . price = ZSTD_MAX_PRICE ; last_pos + + ; } /* fill empty positions */
2017-11-29 00:08:43 +00:00
opt [ pos ] . mlen = mlen ;
opt [ pos ] . off = offset ;
opt [ pos ] . litlen = litlen ;
opt [ pos ] . price = price ;
2018-05-08 19:32:16 +00:00
ZSTD_STATIC_ASSERT ( sizeof ( opt [ pos ] . rep ) = = sizeof ( repHistory ) ) ;
2017-11-29 00:08:43 +00:00
memcpy ( opt [ pos ] . rep , & repHistory , sizeof ( repHistory ) ) ;
2017-11-19 18:21:21 +00:00
} else {
2018-05-29 21:07:25 +00:00
DEBUGLOG ( 7 , " rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f) " ,
pos , mlen , ZSTD_fCost ( price ) , ZSTD_fCost ( opt [ pos ] . price ) ) ;
2018-05-08 19:32:16 +00:00
if ( optLevel = = 0 ) break ; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
2017-11-13 10:19:36 +00:00
}
2017-11-21 18:26:17 +00:00
} } }
} /* for (cur = 1; cur <= last_pos; cur++) */
2017-09-02 01:28:35 +00:00
2018-05-29 21:07:25 +00:00
lastSequence = opt [ last_pos ] ;
cur = last_pos > ZSTD_totalLen ( lastSequence ) ? last_pos - ZSTD_totalLen ( lastSequence ) : 0 ; /* single sequence, and it starts before `ip` */
assert ( cur < ZSTD_OPT_NUM ) ; /* control overflow*/
2017-09-02 01:28:35 +00:00
2017-11-09 20:46:34 +00:00
_shortestPath : /* cur, last_pos, best_mlen, best_off have to be set */
2018-05-29 21:07:25 +00:00
assert ( opt [ 0 ] . mlen = = 0 ) ;
{ U32 const storeEnd = cur + 1 ;
U32 storeStart = storeEnd ;
U32 seqPos = cur ;
DEBUGLOG ( 6 , " start reverse traversal (last_pos:%u, cur:%u) " ,
2018-08-14 19:56:21 +00:00
last_pos , cur ) ; ( void ) last_pos ;
2018-05-29 21:07:25 +00:00
assert ( storeEnd < ZSTD_OPT_NUM ) ;
DEBUGLOG ( 6 , " last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u) " ,
storeEnd , lastSequence . litlen , lastSequence . mlen , lastSequence . off ) ;
opt [ storeEnd ] = lastSequence ;
while ( seqPos > 0 ) {
U32 const backDist = ZSTD_totalLen ( opt [ seqPos ] ) ;
storeStart - - ;
DEBUGLOG ( 6 , " sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u) " ,
seqPos , storeStart , opt [ seqPos ] . litlen , opt [ seqPos ] . mlen , opt [ seqPos ] . off ) ;
opt [ storeStart ] = opt [ seqPos ] ;
seqPos = ( seqPos > backDist ) ? seqPos - backDist : 0 ;
}
/* save sequences */
DEBUGLOG ( 6 , " sending selected sequences into seqStore " )
{ U32 storePos ;
for ( storePos = storeStart ; storePos < = storeEnd ; storePos + + ) {
U32 const llen = opt [ storePos ] . litlen ;
U32 const mlen = opt [ storePos ] . mlen ;
U32 const offCode = opt [ storePos ] . off ;
U32 const advance = llen + mlen ;
DEBUGLOG ( 6 , " considering seq starting at %zi, llen=%u, mlen=%u " ,
fix confusion between unsigned <-> U32
as suggested in #1441.
generally U32 and unsigned are the same thing,
except when they are not ...
case : 32-bit compilation for MIPS (uint32_t == unsigned long)
A vast majority of transformation consists in transforming U32 into unsigned.
In rare cases, it's the other way around (typically for internal code, such as seeds).
Among a few issues this patches solves :
- some parameters were declared with type `unsigned` in *.h,
but with type `U32` in their implementation *.c .
- some parameters have type unsigned*,
but the caller user a pointer to U32 instead.
These fixes are useful.
However, the bulk of changes is about %u formating,
which requires unsigned type,
but generally receives U32 values instead,
often just for brevity (U32 is shorter than unsigned).
These changes are generally minor, or even annoying.
As a consequence, the amount of code changed is larger than I would expect for such a patch.
Testing is also a pain :
it requires manually modifying `mem.h`,
in order to lie about `U32`
and force it to be an `unsigned long` typically.
On a 64-bit system, this will break the equivalence unsigned == U32.
Unfortunately, it will also break a few static_assert(), controlling structure sizes.
So it also requires modifying `debug.h` to make `static_assert()` a noop.
And then reverting these changes.
So it's inconvenient, and as a consequence,
this property is currently not checked during CI tests.
Therefore, these problems can emerge again in the future.
I wonder if it is worth ensuring proper distinction of U32 != unsigned in CI tests.
It's another restriction for coding, adding more frustration during merge tests,
since most platforms don't need this distinction (hence contributor will not see it),
and while this can matter in theory, the number of platforms impacted seems minimal.
Thoughts ?
2018-12-22 00:19:44 +00:00
anchor - istart , ( unsigned ) llen , ( unsigned ) mlen ) ;
2018-05-29 21:07:25 +00:00
if ( mlen = = 0 ) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
assert ( storePos = = storeEnd ) ; /* must be last sequence */
ip = anchor + llen ; /* last "sequence" is a bunch of literals => don't progress anchor */
continue ; /* will finish */
}
/* repcodes update : like ZSTD_updateRep(), but update in place */
if ( offCode > = ZSTD_REP_NUM ) { /* full offset */
rep [ 2 ] = rep [ 1 ] ;
2017-11-09 20:46:34 +00:00
rep [ 1 ] = rep [ 0 ] ;
2018-05-29 21:07:25 +00:00
rep [ 0 ] = offCode - ZSTD_REP_MOVE ;
} else { /* repcode */
U32 const repCode = offCode + ( llen = = 0 ) ;
if ( repCode ) { /* note : if repCode==0, no change */
U32 const currentOffset = ( repCode = = ZSTD_REP_NUM ) ? ( rep [ 0 ] - 1 ) : rep [ repCode ] ;
if ( repCode > = 2 ) rep [ 2 ] = rep [ 1 ] ;
rep [ 1 ] = rep [ 0 ] ;
rep [ 0 ] = currentOffset ;
} }
assert ( anchor + llen < = iend ) ;
ZSTD_updateStats ( optStatePtr , llen , anchor , offCode , mlen ) ;
2019-09-20 07:52:55 +00:00
ZSTD_storeSeq ( seqStore , llen , anchor , iend , offCode , mlen - MINMATCH ) ;
2018-05-29 21:07:25 +00:00
anchor + = advance ;
ip = anchor ;
} }
ZSTD_setBasePrices ( optStatePtr , optLevel ) ;
}
2017-09-02 01:28:35 +00:00
2017-11-21 18:36:14 +00:00
} /* while (ip < ilimit) */
2017-09-02 01:28:35 +00:00
2017-09-06 22:56:32 +00:00
/* Return the last literals size */
2019-05-28 23:11:32 +00:00
return ( size_t ) ( iend - anchor ) ;
2017-09-02 01:28:35 +00:00
}
2017-12-13 00:51:00 +00:00
size_t ZSTD_compressBlock_btopt (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2018-08-23 18:53:34 +00:00
const void * src , size_t srcSize )
2017-09-02 01:28:35 +00:00
{
Fixed Btree update
ZSTD_updateTree() expected to be followed by a Bt match finder, which would update zc->nextToUpdate.
With the new optimal match finder, it's not necessarily the case : a match might be found during repcode or hash3, and stops there because it reaches sufficient_len, without even entering the binary tree.
Previous policy was to nonetheless update zc->nextToUpdate, but the current position would not be inserted, creating "holes" in the btree, aka positions that will no longer be searched.
Now, when current position is not inserted, zc->nextToUpdate is not update, expecting ZSTD_updateTree() to fill the tree later on.
Solution selected is that ZSTD_updateTree() takes care of properly setting zc->nextToUpdate,
so that it no longer depends on a future function to do this job.
It took time to get there, as the issue started with a memory sanitizer error.
The pb would have been easier to spot with a proper `assert()`.
So this patch add a few of them.
Additionnally, I discovered that `make test` does not enable `assert()` during CLI tests.
This patch enables them.
Unfortunately, these `assert()` triggered other (unrelated) bugs during CLI tests, mostly within zstdmt.
So this patch also fixes them.
- Changed packed structure for gcc memory access : memory sanitizer would complain that a read "might" reach out-of-bound position on the ground that the `union` is larger than the type accessed.
Now, to avoid this issue, each type is independent.
- ZSTD_CCtxParams_setParameter() : @return provides the value of parameter, clamped/fixed appropriately.
- ZSTDMT : changed constant name to ZSTDMT_JOBSIZE_MIN
- ZSTDMT : multithreading is automatically disabled when srcSize <= ZSTDMT_JOBSIZE_MIN, since only one thread will be used in this case (saves memory and runtime).
- ZSTDMT : nbThreads is automatically clamped on setting the value.
2017-11-16 20:18:56 +00:00
DEBUGLOG ( 5 , " ZSTD_compressBlock_btopt " ) ;
2018-08-23 18:53:34 +00:00
return ZSTD_compressBlock_opt_generic ( ms , seqStore , rep , src , srcSize , 0 /*optLevel*/ , ZSTD_noDict ) ;
2017-09-02 01:28:35 +00:00
}
2018-05-17 19:19:37 +00:00
2018-05-22 22:06:36 +00:00
/* used in 2-pass strategy */
fix confusion between unsigned <-> U32
as suggested in #1441.
generally U32 and unsigned are the same thing,
except when they are not ...
case : 32-bit compilation for MIPS (uint32_t == unsigned long)
A vast majority of transformation consists in transforming U32 into unsigned.
In rare cases, it's the other way around (typically for internal code, such as seeds).
Among a few issues this patches solves :
- some parameters were declared with type `unsigned` in *.h,
but with type `U32` in their implementation *.c .
- some parameters have type unsigned*,
but the caller user a pointer to U32 instead.
These fixes are useful.
However, the bulk of changes is about %u formating,
which requires unsigned type,
but generally receives U32 values instead,
often just for brevity (U32 is shorter than unsigned).
These changes are generally minor, or even annoying.
As a consequence, the amount of code changed is larger than I would expect for such a patch.
Testing is also a pain :
it requires manually modifying `mem.h`,
in order to lie about `U32`
and force it to be an `unsigned long` typically.
On a 64-bit system, this will break the equivalence unsigned == U32.
Unfortunately, it will also break a few static_assert(), controlling structure sizes.
So it also requires modifying `debug.h` to make `static_assert()` a noop.
And then reverting these changes.
So it's inconvenient, and as a consequence,
this property is currently not checked during CI tests.
Therefore, these problems can emerge again in the future.
I wonder if it is worth ensuring proper distinction of U32 != unsigned in CI tests.
It's another restriction for coding, adding more frustration during merge tests,
since most platforms don't need this distinction (hence contributor will not see it),
and while this can matter in theory, the number of platforms impacted seems minimal.
Thoughts ?
2018-12-22 00:19:44 +00:00
static U32 ZSTD_upscaleStat ( unsigned * table , U32 lastEltIndex , int bonus )
2018-05-17 19:19:37 +00:00
{
U32 s , sum = 0 ;
2018-12-17 23:48:34 +00:00
assert ( ZSTD_FREQ_DIV + bonus > = 0 ) ;
for ( s = 0 ; s < lastEltIndex + 1 ; s + + ) {
2018-05-17 19:19:37 +00:00
table [ s ] < < = ZSTD_FREQ_DIV + bonus ;
table [ s ] - - ;
sum + = table [ s ] ;
}
return sum ;
}
2018-05-22 22:06:36 +00:00
/* used in 2-pass strategy */
MEM_STATIC void ZSTD_upscaleStats ( optState_t * optPtr )
2018-05-17 19:19:37 +00:00
{
2019-02-15 18:29:03 +00:00
if ( ZSTD_compressedLiterals ( optPtr ) )
optPtr - > litSum = ZSTD_upscaleStat ( optPtr - > litFreq , MaxLit , 0 ) ;
2018-12-18 20:32:58 +00:00
optPtr - > litLengthSum = ZSTD_upscaleStat ( optPtr - > litLengthFreq , MaxLL , 0 ) ;
optPtr - > matchLengthSum = ZSTD_upscaleStat ( optPtr - > matchLengthFreq , MaxML , 0 ) ;
optPtr - > offCodeSum = ZSTD_upscaleStat ( optPtr - > offCodeFreq , MaxOff , 0 ) ;
2018-05-17 19:19:37 +00:00
}
2018-12-17 23:48:34 +00:00
2018-12-06 21:38:09 +00:00
/* ZSTD_initStats_ultra():
* make a first compression pass , just to seed stats with more accurate starting values .
* only works on first block , with no dictionary and no ldm .
2019-04-12 18:18:11 +00:00
* this function cannot error , hence its contract must be respected .
2018-12-06 21:38:09 +00:00
*/
2018-12-17 23:48:34 +00:00
static void
ZSTD_initStats_ultra ( ZSTD_matchState_t * ms ,
seqStore_t * seqStore ,
U32 rep [ ZSTD_REP_NUM ] ,
const void * src , size_t srcSize )
2018-12-06 21:38:09 +00:00
{
U32 tmpRep [ ZSTD_REP_NUM ] ; /* updated rep codes will sink here */
2018-12-17 23:48:34 +00:00
memcpy ( tmpRep , rep , sizeof ( tmpRep ) ) ;
2018-12-06 21:38:09 +00:00
2018-12-20 00:54:15 +00:00
DEBUGLOG ( 4 , " ZSTD_initStats_ultra (srcSize=%zu) " , srcSize ) ;
2018-12-06 21:38:09 +00:00
assert ( ms - > opt . litLengthSum = = 0 ) ; /* first block */
assert ( seqStore - > sequences = = seqStore - > sequencesStart ) ; /* no ldm */
assert ( ms - > window . dictLimit = = ms - > window . lowLimit ) ; /* no dictionary */
2018-12-11 02:45:03 +00:00
assert ( ms - > window . dictLimit - ms - > nextToUpdate < = 1 ) ; /* no prefix (note: intentional overflow, defined as 2-complement) */
2018-12-06 21:38:09 +00:00
ZSTD_compressBlock_opt_generic ( ms , seqStore , tmpRep , src , srcSize , 2 /*optLevel*/ , ZSTD_noDict ) ; /* generate stats into ms->opt*/
/* invalidate first scan from history */
ZSTD_resetSeqStore ( seqStore ) ;
ms - > window . base - = srcSize ;
ms - > window . dictLimit + = ( U32 ) srcSize ;
ms - > window . lowLimit = ms - > window . dictLimit ;
ms - > nextToUpdate = ms - > window . dictLimit ;
/* re-inforce weight of collected statistics */
ZSTD_upscaleStats ( & ms - > opt ) ;
}
2018-05-17 19:19:37 +00:00
2017-12-13 00:51:00 +00:00
size_t ZSTD_compressBlock_btultra (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2018-08-23 18:53:34 +00:00
const void * src , size_t srcSize )
2017-09-02 01:28:35 +00:00
{
2018-12-18 21:40:07 +00:00
DEBUGLOG ( 5 , " ZSTD_compressBlock_btultra (srcSize=%zu) " , srcSize ) ;
2018-12-06 21:38:09 +00:00
return ZSTD_compressBlock_opt_generic ( ms , seqStore , rep , src , srcSize , 2 /*optLevel*/ , ZSTD_noDict ) ;
}
size_t ZSTD_compressBlock_btultra2 (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
const void * src , size_t srcSize )
{
2018-12-20 00:54:15 +00:00
U32 const current = ( U32 ) ( ( const BYTE * ) src - ms - > window . base ) ;
2018-12-18 21:40:07 +00:00
DEBUGLOG ( 5 , " ZSTD_compressBlock_btultra2 (srcSize=%zu) " , srcSize ) ;
2018-12-06 21:38:09 +00:00
2018-12-17 23:48:34 +00:00
/* 2-pass strategy:
2018-05-22 22:06:36 +00:00
* this strategy makes a first pass over first block to collect statistics
* and seed next round ' s statistics with it .
2018-12-11 02:45:03 +00:00
* After 1 st pass , function forgets everything , and starts a new block .
* Consequently , this can only work if no data has been previously loaded in tables ,
* aka , no dictionary , no prefix , no ldm preprocessing .
2018-05-22 22:06:36 +00:00
* The compression ratio gain is generally small ( ~ 0.5 % on first block ) ,
* the cost is 2 x cpu time on first block . */
2018-05-17 23:13:53 +00:00
assert ( srcSize < = ZSTD_BLOCKSIZE_MAX ) ;
if ( ( ms - > opt . litLengthSum = = 0 ) /* first block */
2018-12-11 02:45:03 +00:00
& & ( seqStore - > sequences = = seqStore - > sequencesStart ) /* no ldm */
& & ( ms - > window . dictLimit = = ms - > window . lowLimit ) /* no dictionary */
2018-12-20 00:54:15 +00:00
& & ( current = = ms - > window . dictLimit ) /* start of frame, nothing already loaded nor skipped */
2018-12-18 20:32:58 +00:00
& & ( srcSize > ZSTD_PREDEF_THRESHOLD )
2018-12-11 02:45:03 +00:00
) {
2018-12-06 21:38:09 +00:00
ZSTD_initStats_ultra ( ms , seqStore , rep , src , srcSize ) ;
2018-05-17 19:19:37 +00:00
}
2018-12-06 21:38:09 +00:00
2018-08-23 18:53:34 +00:00
return ZSTD_compressBlock_opt_generic ( ms , seqStore , rep , src , srcSize , 2 /*optLevel*/ , ZSTD_noDict ) ;
2017-09-02 01:28:35 +00:00
}
2018-06-13 21:10:37 +00:00
size_t ZSTD_compressBlock_btopt_dictMatchState (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2018-08-23 18:53:34 +00:00
const void * src , size_t srcSize )
2018-06-13 21:10:37 +00:00
{
2018-08-23 18:53:34 +00:00
return ZSTD_compressBlock_opt_generic ( ms , seqStore , rep , src , srcSize , 0 /*optLevel*/ , ZSTD_dictMatchState ) ;
2018-06-13 21:10:37 +00:00
}
size_t ZSTD_compressBlock_btultra_dictMatchState (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2018-08-23 18:53:34 +00:00
const void * src , size_t srcSize )
2018-06-13 21:10:37 +00:00
{
2018-08-23 18:53:34 +00:00
return ZSTD_compressBlock_opt_generic ( ms , seqStore , rep , src , srcSize , 2 /*optLevel*/ , ZSTD_dictMatchState ) ;
2018-06-13 21:10:37 +00:00
}
2017-12-13 00:51:00 +00:00
size_t ZSTD_compressBlock_btopt_extDict (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2018-08-23 18:53:34 +00:00
const void * src , size_t srcSize )
2017-09-02 01:28:35 +00:00
{
2018-08-23 18:53:34 +00:00
return ZSTD_compressBlock_opt_generic ( ms , seqStore , rep , src , srcSize , 0 /*optLevel*/ , ZSTD_extDict ) ;
2017-09-02 01:28:35 +00:00
}
2017-12-13 00:51:00 +00:00
size_t ZSTD_compressBlock_btultra_extDict (
ZSTD_matchState_t * ms , seqStore_t * seqStore , U32 rep [ ZSTD_REP_NUM ] ,
2018-08-23 18:53:34 +00:00
const void * src , size_t srcSize )
2017-09-02 01:28:35 +00:00
{
2018-08-23 18:53:34 +00:00
return ZSTD_compressBlock_opt_generic ( ms , seqStore , rep , src , srcSize , 2 /*optLevel*/ , ZSTD_extDict ) ;
2017-09-02 01:28:35 +00:00
}
2018-12-06 21:38:09 +00:00
/* note : no btultra2 variant for extDict nor dictMatchState,
2018-12-06 23:00:52 +00:00
* because btultra2 is not meant to work with dictionaries
2018-12-06 21:38:09 +00:00
* and is only specific for the first block ( no prefix ) */