Optimized loop bounds to allow the compiler to unroll the loop.

This has no measurable impact on large files but improves small file
decompression by ~1-2% for 10kB, benchmarked with:

head -c 10000 silesia.tar > /tmp/test
make CC=/usr/local/bin/clang-9 BUILD_STATIC=1 && ./lzbench -ezstd -t1,5 /tmp/test
This commit is contained in:
Clement Courbet 2019-11-05 14:14:02 +01:00
parent 332aade370
commit b3c9fc27b4

View File

@ -181,17 +181,29 @@ size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize
/* fill DTable */ /* fill DTable */
{ U32 n; { U32 n;
for (n=0; n<nbSymbols; n++) { size_t const nEnd = nbSymbols;
U32 const w = huffWeight[n]; for (n=0; n<nEnd; n++) {
U32 const length = (1 << w) >> 1; size_t const w = huffWeight[n];
U32 u; size_t const length = (1 << w) >> 1;
size_t const uStart = rankVal[w];
size_t const uEnd = uStart + length;
size_t u;
HUF_DEltX1 D; HUF_DEltX1 D;
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); D.byte = (BYTE)n;
for (u = rankVal[w]; u < rankVal[w] + length; u++) D.nbBits = (BYTE)(tableLog + 1 - w);
dt[u] = D; rankVal[w] = uEnd;
rankVal[w] += length; if (length < 4) {
} } /* Use length in the loop bound so the compiler knows it is short. */
for (u = 0; u < length; ++u)
dt[uStart + u] = D;
} else {
/* Unroll the loop 4 times, we know it is a power of 2. */
for (u = uStart; u < uEnd; u += 4) {
dt[u + 0] = D;
dt[u + 1] = D;
dt[u + 2] = D;
dt[u + 3] = D;
} } } }
return iSize; return iSize;
} }