Make Fullbench use new function

Rearrange Args
Add nothing function
Use new function, change locals to match
New Display
Comment cleanup
Change builds
This commit is contained in:
George Lu 2018-06-14 14:46:17 -04:00
parent 20f4f32379
commit 8522346322
7 changed files with 167 additions and 160 deletions

View File

@ -167,11 +167,13 @@
<ItemGroup>
<ClCompile Include="..\..\..\lib\common\xxhash.c" />
<ClCompile Include="..\..\..\programs\datagen.c" />
<ClCompile Include="..\..\..\programs\bench.c" />
<ClCompile Include="..\..\..\tests\fullbench.c" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\..\lib\zstd.h" />
<ClInclude Include="..\..\..\programs\datagen.h" />
<ClInclude Include="..\..\..\programs\bench.h" />
<ClInclude Include="..\..\..\programs\util.h" />
</ItemGroup>
<ItemGroup>

View File

@ -174,6 +174,7 @@
<ClCompile Include="..\..\..\lib\decompress\huf_decompress.c" />
<ClCompile Include="..\..\..\lib\decompress\zstd_decompress.c" />
<ClCompile Include="..\..\..\programs\datagen.c" />
<ClCompile Include="..\..\..\programs\bench.c" />
<ClCompile Include="..\..\..\tests\fullbench.c" />
</ItemGroup>
<ItemGroup>
@ -195,6 +196,7 @@
<ClInclude Include="..\..\..\lib\legacy\zstd_legacy.h" />
<ClInclude Include="..\..\..\programs\datagen.h" />
<ClInclude Include="..\..\..\programs\util.h" />
<ClInclude Include="..\..\..\programs\bench.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">

View File

@ -64,8 +64,7 @@
static const size_t maxMemory = (sizeof(size_t)==4) ? (2 GB - 64 MB) : (size_t)(1ULL << ((sizeof(size_t)*8)-31));
//TODO: remove this gv as well
//Only used in Synthetic test. Separate?
/* remove this in the future? */
static U32 g_compressibilityDefault = 50;
/* *************************************
@ -119,8 +118,8 @@ static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER;
BMK_advancedParams_t BMK_defaultAdvancedParams(void) {
BMK_advancedParams_t res = {
0, /* mode */
0, /* nbCycles */
BMK_both, /* mode */
BMK_timeMode, /* loopMode */
BMK_TIMETEST_DEFAULT_S, /* nbSeconds */
0, /* blockSize */
0, /* nbWorkers */
@ -266,18 +265,14 @@ static size_t local_defaultDecompress(
}
//ignore above for error stuff, return type still undecided
/* mode 0 : iter = # seconds, else iter = # cycles */
/* initFn will be measured once, bench fn will be measured x times */
/* benchFn should return error value or out Size */
//problem : how to get cSize this way for ratio?
//also possible fastest rounds down to 0 if 0 < loopDuration < nbLoops (that would mean <1ns / op though)
/* takes # of blocks and list of size & stuff for each. */
BMK_customReturn_t BMK_benchCustom(
const char* functionName, size_t blockCount,
const void* const * const srcBuffers, size_t* srcSizes,
void* const * const dstBuffers, size_t* dstSizes,
const void* const * const srcBuffers, const size_t* srcSizes,
void* const * const dstBuffers, const size_t* dstSizes,
size_t (*initFn)(void*), size_t (*benchFn)(const void*, size_t, void*, size_t, void*),
void* initPayload, void* benchPayload,
unsigned mode, unsigned iter,
@ -302,9 +297,7 @@ BMK_customReturn_t BMK_benchCustom(
/* display last 17 char's of functionName*/
if (strlen(functionName)>17) functionName += strlen(functionName)-17;
if(!iter) {
if(mode) {
EXM_THROW(1, BMK_customReturn_t, "nbSeconds must be nonzero \n");
} else {
if(mode == BMK_iterMode) {
EXM_THROW(1, BMK_customReturn_t, "nbLoops must be nonzero \n");
}
@ -314,73 +307,81 @@ BMK_customReturn_t BMK_benchCustom(
srcSize += srcSizes[ind];
}
//change to switch if more modes?
if(!mode) {
int completed = 0;
U64 const maxTime = (iter * TIMELOOP_NANOSEC) + 1;
unsigned nbLoops = 1;
UTIL_time_t coolTime = UTIL_getTime();
while(!completed) {
switch(mode) {
case BMK_timeMode:
{
int completed = 0;
U64 const maxTime = (iter * TIMELOOP_NANOSEC) + 1;
unsigned nbLoops = 1;
UTIL_time_t coolTime = UTIL_getTime();
while(!completed) {
unsigned i, j;
/* Overheat protection */
if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
DISPLAYLEVEL(2, "\rcooling down ... \r");
UTIL_sleep(COOLPERIOD_SEC);
coolTime = UTIL_getTime();
}
for(i = 0; i < blockCount; i++) {
memset(dstBuffers[i], 0xD6, dstSizes[i]); /* warm up and erase result buffer */
}
clockStart = UTIL_getTime();
(*initFn)(initPayload);
for(i = 0; i < nbLoops; i++) {
for(j = 0; j < blockCount; j++) {
size_t res = (*benchFn)(srcBuffers[j], srcSizes[j], dstBuffers[j], dstSizes[j], benchPayload);
if(ZSTD_isError(res)) {
EXM_THROW(2, BMK_customReturn_t, "%s() failed on block %u of size %u : %s \n",
functionName, j, (U32)dstSizes[j], ZSTD_getErrorName(res));
} else if (toAdd) {
dstSize += res;
}
}
toAdd = 0;
}
{ U64 const loopDuration = UTIL_clockSpanNano(clockStart);
if (loopDuration > 0) {
fastest = MIN(fastest, loopDuration / nbLoops);
nbLoops = (U32)(TIMELOOP_NANOSEC / fastest) + 1;
} else {
assert(nbLoops < 40000000); /* avoid overflow */
nbLoops *= 100;
}
totalTime += loopDuration;
completed = (totalTime >= maxTime);
}
}
break;
}
case BMK_iterMode:
{
unsigned i, j;
/* Overheat protection */
if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
DISPLAYLEVEL(2, "\rcooling down ... \r");
UTIL_sleep(COOLPERIOD_SEC);
coolTime = UTIL_getTime();
}
for(i = 0; i < blockCount; i++) {
memset(dstBuffers[i], 0xD6, dstSizes[i]); /* warm up and erase result buffer */
}
clockStart = UTIL_getTime();
(*initFn)(initPayload);
for(i = 0; i < nbLoops; i++) {
for(i = 0; i < iter; i++) {
for(j = 0; j < blockCount; j++) {
size_t res = (*benchFn)(srcBuffers[j], srcSizes[j], dstBuffers[j], dstSizes[j], benchPayload);
if(ZSTD_isError(res)) {
EXM_THROW(2, BMK_customReturn_t, "%s() failed on block %u of size %u : %s \n",
functionName, j, (U32)dstSizes[j], ZSTD_getErrorName(res));
} else if (toAdd) {
} else if(toAdd) {
dstSize += res;
}
}
toAdd = 0;
}
{ U64 const loopDuration = UTIL_clockSpanNano(clockStart);
if (loopDuration > 0) {
fastest = MIN(fastest, loopDuration / nbLoops);
nbLoops = (U32)(TIMELOOP_NANOSEC / fastest) + 1;
} else {
assert(nbLoops < 40000000); /* avoid overflow */
nbLoops *= 100;
}
totalTime += loopDuration;
completed = (totalTime >= maxTime);
totalTime = UTIL_clockSpanNano(clockStart);
if(!totalTime) {
EXM_THROW(3, BMK_customReturn_t, "Cycle count (%u) too short to measure \n", iter);
} else {
fastest = totalTime / iter;
}
break;
}
} else {
unsigned i, j;
clockStart = UTIL_getTime();
for(i = 0; i < iter; i++) {
for(j = 0; j < blockCount; j++) {
size_t res = (*benchFn)(srcBuffers[j], srcSizes[j], dstBuffers[j], dstSizes[j], benchPayload);
if(ZSTD_isError(res)) {
EXM_THROW(2, BMK_customReturn_t, "%s() failed on block %u of size %u : %s \n",
functionName, j, (U32)dstSizes[j], ZSTD_getErrorName(res));
} else if(toAdd) {
dstSize += res;
}
}
toAdd = 0;
}
totalTime = UTIL_clockSpanNano(clockStart);
if(!totalTime) {
EXM_THROW(3, BMK_customReturn_t, "Cycle count (%u) too short to measure \n", iter);
} else {
fastest = totalTime / iter;
}
default:
EXM_THROW(4, BMK_customReturn_t, "Unknown Mode \n");
}
retval.error = 0;
retval.result.time = fastest;
@ -396,18 +397,18 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
int displayLevel, const char* displayName, const BMK_advancedParams_t* adv)
{
size_t const blockSize = ((adv->blockSize>=32 && (adv->mode != BMK_DECODE_ONLY)) ? adv->blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ;
size_t const blockSize = ((adv->blockSize>=32 && (adv->mode != BMK_decodeOnly)) ? adv->blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ;
U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + nbFiles;
/* these are the blockTable parameters, just split up */
const void ** const srcPtrs = malloc(maxNbBlocks * sizeof(void*));
size_t* const srcSizes = malloc(maxNbBlocks * sizeof(size_t));
const void ** const srcPtrs = (const void ** const)malloc(maxNbBlocks * sizeof(void*));
size_t* const srcSizes = (size_t* const)malloc(maxNbBlocks * sizeof(size_t));
void ** const cPtrs = malloc(maxNbBlocks * sizeof(void*));
size_t* const cSizes = malloc(maxNbBlocks * sizeof(size_t));
void ** const cPtrs = (void** const)malloc(maxNbBlocks * sizeof(void*));
size_t* const cSizes = (size_t* const)malloc(maxNbBlocks * sizeof(size_t));
void ** const resPtrs = malloc(maxNbBlocks * sizeof(void*));
size_t* const resSizes = malloc(maxNbBlocks * sizeof(size_t));
void ** const resPtrs = (void** const)malloc(maxNbBlocks * sizeof(void*));
size_t* const resSizes = (size_t* const)malloc(maxNbBlocks * sizeof(size_t));
const size_t maxCompressedSize = ZSTD_compressBound(srcSize) + (maxNbBlocks * 1024); /* add some room for safety */
void* compressedBuffer = malloc(maxCompressedSize);
@ -430,7 +431,7 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
/* init */
if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* display last 17 characters */
if (adv->mode == BMK_DECODE_ONLY) { /* benchmark only decompression : source must be already compressed */
if (adv->mode == BMK_decodeOnly) { /* benchmark only decompression : source must be already compressed */
const char* srcPtr = (const char*)srcBuffer;
U64 totalDSize64 = 0;
U32 fileNb;
@ -458,19 +459,18 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
U32 fileNb;
for (nbBlocks=0, fileNb=0; fileNb<nbFiles; fileNb++) {
size_t remaining = fileSizes[fileNb];
U32 const nbBlocksforThisFile = (adv->mode == BMK_DECODE_ONLY) ? 1 : (U32)((remaining + (blockSize-1)) / blockSize);
U32 const nbBlocksforThisFile = (adv->mode == BMK_decodeOnly) ? 1 : (U32)((remaining + (blockSize-1)) / blockSize);
U32 const blockEnd = nbBlocks + nbBlocksforThisFile;
for ( ; nbBlocks<blockEnd; nbBlocks++) {
size_t const thisBlockSize = MIN(remaining, blockSize);
srcPtrs[nbBlocks] = (const void*)srcPtr;
srcSizes[nbBlocks] = thisBlockSize;
cPtrs[nbBlocks] = (void*)cPtr;
cSizes[nbBlocks] = (adv->mode == BMK_DECODE_ONLY) ? thisBlockSize : ZSTD_compressBound(thisBlockSize);
//blockTable[nbBlocks].cSize = blockTable[nbBlocks].cRoom;
cSizes[nbBlocks] = (adv->mode == BMK_decodeOnly) ? thisBlockSize : ZSTD_compressBound(thisBlockSize);
resPtrs[nbBlocks] = (void*)resPtr;
resSizes[nbBlocks] = (adv->mode == BMK_DECODE_ONLY) ? (size_t) ZSTD_findDecompressedSize(srcPtr, thisBlockSize) : thisBlockSize;
resSizes[nbBlocks] = (adv->mode == BMK_decodeOnly) ? (size_t) ZSTD_findDecompressedSize(srcPtr, thisBlockSize) : thisBlockSize;
srcPtr += thisBlockSize;
cPtr += cSizes[nbBlocks]; //blockTable[nbBlocks].cRoom;
cPtr += cSizes[nbBlocks];
resPtr += thisBlockSize;
remaining -= thisBlockSize;
}
@ -478,26 +478,29 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
}
/* warmimg up memory */
if (adv->mode == BMK_DECODE_ONLY) {
if (adv->mode == BMK_decodeOnly) {
memcpy(compressedBuffer, srcBuffer, loadedCompressedSize);
} else {
RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1);
}
/* Bench */
//TODO: Make sure w/o new loop decode_only code isn't run
//TODO: Support nbLoops and nbSeconds
{
U64 const crcOrig = (adv->mode == BMK_DECODE_ONLY) ? 0 : XXH64(srcBuffer, srcSize, 0);
U64 const crcOrig = (adv->mode == BMK_decodeOnly) ? 0 : XXH64(srcBuffer, srcSize, 0);
# define NB_MARKS 4
const char* const marks[NB_MARKS] = { " |", " /", " =", "\\" };
U32 markNb = 0;
DISPLAYLEVEL(2, "\r%79s\r", "");
if (adv->mode != BMK_DECODE_ONLY) {
BMK_initCCtxArgs cctxprep = { ctx, dictBuffer, dictBufferSize, cLevel, comprParams, adv };
if (adv->mode != BMK_decodeOnly) {
BMK_initCCtxArgs cctxprep;
BMK_customReturn_t compressionResults;
cctxprep.ctx = ctx;
cctxprep.dictBuffer = dictBuffer;
cctxprep.dictBufferSize = dictBufferSize;
cctxprep.cLevel = cLevel;
cctxprep.comprParams = comprParams;
cctxprep.adv = adv;
/* Compression */
DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (U32)srcSize);
compressionResults = BMK_benchCustom("ZSTD_compress_generic", nbBlocks,
@ -524,11 +527,15 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
ratioAccuracy, ratio,
cSpeedAccuracy, compressionSpeed);
}
} /* if (adv->mode != BMK_DECODE_ONLY) */
{
BMK_initDCtxArgs dctxprep = { dctx, dictBuffer, dictBufferSize };
} /* if (adv->mode != BMK_decodeOnly) */
if(adv->mode != BMK_compressOnly) {
BMK_initDCtxArgs dctxprep;
BMK_customReturn_t decompressionResults;
dctxprep.dctx = dctx;
dctxprep.dictBuffer = dictBuffer;
dctxprep.dictBufferSize = dictBufferSize;
decompressionResults = BMK_benchCustom("ZSTD_decompress_generic", nbBlocks,
(const void * const *)cPtrs, cSizes, resPtrs, resSizes,
&local_initDCtx, &local_defaultDecompress,
@ -556,7 +563,8 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
/* CRC Checking */
{ U64 const crcCheck = XXH64(resultBuffer, srcSize, 0);
if ((adv->mode != BMK_DECODE_ONLY) && (crcOrig!=crcCheck)) {
/* adv->mode == 0 -> compress + decompress */
if ((adv->mode == BMK_both) && (crcOrig!=crcCheck)) {
size_t u;
DISPLAY("!!! WARNING !!! %14s : Invalid Checksum : %x != %x \n", displayName, (unsigned)crcOrig, (unsigned)crcCheck);
for (u=0; u<srcSize; u++) {
@ -602,13 +610,13 @@ BMK_return_t BMK_benchMemAdvanced(const void* srcBuffer, size_t srcSize,
DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName);
}
DISPLAYLEVEL(2, "%2i#\n", cLevel);
} /* Bench */
} /* Bench */
/* clean up */
free(compressedBuffer);
free(resultBuffer);
free(srcPtrs);
free((void*)srcPtrs);
free(srcSizes);
free(cPtrs);
free(cSizes);
@ -689,9 +697,13 @@ static BMK_returnPtr_t BMK_benchCLevel(const void* srcBuffer, size_t benchedSize
{
int l;
BMK_result_t* res = (BMK_result_t*)malloc(sizeof(BMK_result_t) * (cLevelLast - cLevel + 1));
BMK_returnPtr_t ret = { 0, res };
BMK_returnPtr_t ret;
const char* pch = strrchr(displayName, '\\'); /* Windows */
ret.error = 0;
ret.result = res;
if (!pch) pch = strrchr(displayName, '/'); /* Linux */
if (pch) displayName = pch+1;
@ -773,12 +785,13 @@ static BMK_returnSet_t BMK_benchFileTable(const char* const * const fileNamesTab
size_t benchedSize;
void* dictBuffer = NULL;
size_t dictBufferSize = 0;
size_t* const fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t));
size_t* const fileSizes = (size_t*)calloc(nbFiles, sizeof(size_t));
BMK_returnSet_t res;
U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles);
res.result.cLevel = cLevel;
res.result.cLevelLast = cLevelLast;
res.result.results = NULL;
if (!fileSizes) EXM_THROW(12, BMK_returnSet_t, "not enough memory for fileSizes");
/* Load dictionary */
@ -844,9 +857,10 @@ static BMK_returnSet_t BMK_benchFileTable(const char* const * const fileNamesTab
res.result.nbFiles = 1;
snprintf (mfName, sizeof(mfName), " %u files", nbFiles);
{
BMK_returnPtr_t errorOrPtr;
const char* const displayName = (nbFiles > 1) ? mfName : fileNamesTable[0];
res.result.results = (BMK_result_t**)malloc(sizeof(BMK_result_t*));
BMK_returnPtr_t errorOrPtr = BMK_benchCLevel(srcBuffer, benchedSize,
errorOrPtr = BMK_benchCLevel(srcBuffer, benchedSize,
fileSizes, nbFiles,
cLevel, cLevelLast, compressionParams,
dictBuffer, dictBufferSize,
@ -877,7 +891,8 @@ static BMK_returnSet_t BMK_syntheticTest(int cLevel, int cLevelLast, double comp
size_t benchedSize = 10000000;
void* const srcBuffer = malloc(benchedSize);
BMK_returnSet_t res;
res.result.results = malloc(sizeof(BMK_result_t*));
BMK_returnPtr_t errPtr;
res.result.results = (BMK_result_t**)calloc(1,sizeof(BMK_result_t*));
res.result.nbFiles = 1;
res.result.cLevel = cLevel;
res.result.cLevelLast = cLevelLast;
@ -889,7 +904,7 @@ static BMK_returnSet_t BMK_syntheticTest(int cLevel, int cLevelLast, double comp
/* Bench */
snprintf (name, sizeof(name), "Synthetic %2u%%", (unsigned)(compressibility*100));
BMK_returnPtr_t errPtr = BMK_benchCLevel(srcBuffer, benchedSize,
errPtr = BMK_benchCLevel(srcBuffer, benchedSize,
&benchedSize, 1,
cLevel, cLevelLast, compressionParams,
NULL, 0,
@ -901,7 +916,7 @@ static BMK_returnSet_t BMK_syntheticTest(int cLevel, int cLevelLast, double comp
res.result.results[0] = errPtr.result;
/* clean up */
free(srcBuffer);
free((void*)srcBuffer);
res.error = 0;
return res;
}
@ -947,7 +962,8 @@ BMK_result_t BMK_getResult(BMK_resultSet_t resultSet, unsigned fileIdx, int cLev
void BMK_freeResultSet(BMK_resultSet_t src) {
unsigned i;
for(i = 0; i <= src.nbFiles; i++) {
if(src.results == NULL) { return; }
for(i = 0; i < src.nbFiles; i++) {
free(src.results[i]);
}
free(src.results);

View File

@ -19,11 +19,16 @@ extern "C" {
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
#include "zstd.h" /* ZSTD_compressionParameters */
#define BMK_COMPRESS_ONLY 2
#define BMK_DECODE_ONLY 1
typedef enum {
BMK_timeMode = 0,
BMK_iterMode = 1
} BMK_loopMode_t;
#define TIME_MODE = 0
#define ITER_MODE = 1
typedef enum {
BMK_both = 0,
BMK_decodeOnly = 1,
BMK_compressOnly = 2
} BMK_mode_t;
#define ERROR_STRUCT(baseType, typeName) typedef struct { \
int error; \
@ -55,8 +60,8 @@ ERROR_STRUCT(BMK_customResult_t, BMK_customReturn_t);
/* want all 0 to be default, but wb ldmBucketSizeLog/ldmHashEveryLog */
typedef struct {
unsigned mode; /* 0: all, 1: compress only 2: decode only */
int loopMode; /* if loopmode, then nbSeconds = nbLoops */
BMK_mode_t mode; /* 0: all, 1: compress only 2: decode only */
BMK_loopMode_t loopMode; /* if loopmode, then nbSeconds = nbLoops */
unsigned nbSeconds; /* default timing is in nbSeconds. If nbCycles != 0 then use that */
size_t blockSize; /* Maximum allowable size of a block*/
unsigned nbWorkers; /* multithreading */
@ -91,8 +96,8 @@ BMK_advancedParams_t BMK_defaultAdvancedParams(void);
* .result will contain the speed (B/s) and time per loop (ns)
*/
BMK_customReturn_t BMK_benchCustom(const char* functionName, size_t blockCount,
const void* const * const srcBuffers, size_t* srcSizes,
void* const * const dstBuffers, size_t* dstSizes,
const void* const * const srcBuffers, const size_t* srcSizes,
void* const * const dstBuffers, const size_t* dstSizes,
size_t (*initFn)(void*), size_t (*benchFn)(const void*, size_t, void*, size_t, void*),
void* initPayload, void* benchPayload,
unsigned mode, unsigned iter,

View File

@ -608,7 +608,7 @@ int main(int argCount, const char* argv[])
/* Decoding */
case 'd':
#ifndef ZSTD_NOBENCH
adv.mode = BMK_DECODE_ONLY;
adv.mode = BMK_decodeOnly;
if (operation==zom_bench) { argument++; break; } /* benchmark decode (hidden option) */
#endif
operation=zom_decompress; argument++; break;
@ -816,7 +816,7 @@ int main(int argCount, const char* argv[])
if (g_ldmHashEveryLog != LDM_PARAM_DEFAULT) {
adv.ldmHashEveryLog = g_ldmHashEveryLog;
}
BMK_benchFilesAdvanced(filenameTable, filenameIdx, dictFileName, cLevel, cLevelLast, &compressionParams, g_displayLevel, &adv);
BMK_freeResultSet(BMK_benchFilesAdvanced(filenameTable, filenameIdx, dictFileName, cLevel, cLevelLast, &compressionParams, g_displayLevel, &adv).result);
#else
(void)bench_nbSeconds; (void)blockSize; (void)setRealTimePrio; (void)separateFiles;
#endif

View File

@ -133,7 +133,7 @@ fullbench fullbench32 : CPPFLAGS += $(MULTITHREAD_CPP)
fullbench fullbench32 : LDFLAGS += $(MULTITHREAD_LD)
fullbench fullbench32 : DEBUGFLAGS = # turn off assert() for speed measurements
fullbench fullbench32 : $(ZSTD_FILES)
fullbench fullbench32 : $(PRGDIR)/datagen.c fullbench.c
fullbench fullbench32 : $(PRGDIR)/datagen.c $(PRGDIR)/bench.c fullbench.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
fullbench-lib : zstd-staticLib

View File

@ -30,6 +30,7 @@
#include "zstd.h" /* ZSTD_versionString */
#include "util.h" /* time functions */
#include "datagen.h"
#include "bench.h" /* CustomBench*/
/*_************************************
@ -93,14 +94,19 @@ static size_t BMK_findMaxMem(U64 requiredMem)
/*_*******************************************************
* Benchmark wrappers
*********************************************************/
size_t local_ZSTD_compress(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
size_t local_nothing(void* x) {
(void)x;
return 0;
}
size_t local_ZSTD_compress(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
{
(void)buff2;
return ZSTD_compress(dst, dstSize, src, srcSize, 1);
}
static size_t g_cSize = 0;
size_t local_ZSTD_decompress(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
size_t local_ZSTD_decompress(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
{
(void)src; (void)srcSize;
return ZSTD_decompress(dst, dstSize, buff2, g_cSize);
@ -110,14 +116,14 @@ static ZSTD_DCtx* g_zdc = NULL;
#ifndef ZSTD_DLL_IMPORT
extern size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* ctx, const void* src, size_t srcSize);
size_t local_ZSTD_decodeLiteralsBlock(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
size_t local_ZSTD_decodeLiteralsBlock(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
{
(void)src; (void)srcSize; (void)dst; (void)dstSize;
return ZSTD_decodeLiteralsBlock((ZSTD_DCtx*)g_zdc, buff2, g_cSize);
}
extern size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeq, const void* src, size_t srcSize);
size_t local_ZSTD_decodeSeqHeaders(void* dst, size_t dstSize, void* buff2, const void* src, size_t srcSize)
size_t local_ZSTD_decodeSeqHeaders(const void* src, size_t srcSize, void* dst, size_t dstSize, void* buff2)
{
int nbSeq;
(void)src; (void)srcSize; (void)dst; (void)dstSize;
@ -126,7 +132,7 @@ size_t local_ZSTD_decodeSeqHeaders(void* dst, size_t dstSize, void* buff2, const
#endif
static ZSTD_CStream* g_cstream= NULL;
size_t local_ZSTD_compressStream(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
size_t local_ZSTD_compressStream(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
@ -143,7 +149,7 @@ size_t local_ZSTD_compressStream(void* dst, size_t dstCapacity, void* buff2, con
return buffOut.pos;
}
static size_t local_ZSTD_compress_generic_end(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
static size_t local_ZSTD_compress_generic_end(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
@ -159,7 +165,7 @@ static size_t local_ZSTD_compress_generic_end(void* dst, size_t dstCapacity, voi
return buffOut.pos;
}
static size_t local_ZSTD_compress_generic_continue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
static size_t local_ZSTD_compress_generic_continue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
@ -176,7 +182,7 @@ static size_t local_ZSTD_compress_generic_continue(void* dst, size_t dstCapacity
return buffOut.pos;
}
static size_t local_ZSTD_compress_generic_T2_end(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
static size_t local_ZSTD_compress_generic_T2_end(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
@ -193,7 +199,7 @@ static size_t local_ZSTD_compress_generic_T2_end(void* dst, size_t dstCapacity,
return buffOut.pos;
}
static size_t local_ZSTD_compress_generic_T2_continue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
static size_t local_ZSTD_compress_generic_T2_continue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
@ -212,7 +218,7 @@ static size_t local_ZSTD_compress_generic_T2_continue(void* dst, size_t dstCapac
}
static ZSTD_DStream* g_dstream= NULL;
static size_t local_ZSTD_decompressStream(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
static size_t local_ZSTD_decompressStream(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
ZSTD_outBuffer buffOut;
ZSTD_inBuffer buffIn;
@ -231,7 +237,7 @@ static size_t local_ZSTD_decompressStream(void* dst, size_t dstCapacity, void* b
static ZSTD_CCtx* g_zcc = NULL;
#ifndef ZSTD_DLL_IMPORT
size_t local_ZSTD_compressContinue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
size_t local_ZSTD_compressContinue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
(void)buff2;
ZSTD_compressBegin(g_zcc, 1 /* compressionLevel */);
@ -239,7 +245,7 @@ size_t local_ZSTD_compressContinue(void* dst, size_t dstCapacity, void* buff2, c
}
#define FIRST_BLOCK_SIZE 8
size_t local_ZSTD_compressContinue_extDict(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
size_t local_ZSTD_compressContinue_extDict(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
BYTE firstBlockBuf[FIRST_BLOCK_SIZE];
@ -255,7 +261,7 @@ size_t local_ZSTD_compressContinue_extDict(void* dst, size_t dstCapacity, void*
return ZSTD_compressEnd(g_zcc, dst, dstCapacity, (const BYTE*)src + FIRST_BLOCK_SIZE, srcSize - FIRST_BLOCK_SIZE);
}
size_t local_ZSTD_decompressContinue(void* dst, size_t dstCapacity, void* buff2, const void* src, size_t srcSize)
size_t local_ZSTD_decompressContinue(const void* src, size_t srcSize, void* dst, size_t dstCapacity, void* buff2)
{
size_t regeneratedSize = 0;
const BYTE* ip = (const BYTE*)buff2;
@ -288,8 +294,7 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
size_t const dstBuffSize = ZSTD_compressBound(srcSize);
void* buff2;
const char* benchName;
size_t (*benchFunction)(void* dst, size_t dstSize, void* verifBuff, const void* src, size_t srcSize);
double bestTime = 100000000.;
size_t (*benchFunction)(const void* src, size_t srcSize, void* dst, size_t dstSize, void* verifBuff);
/* Selection */
switch(benchNb)
@ -419,45 +424,22 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
default : ;
}
/* warming up memory */
{ size_t i; for (i=0; i<dstBuffSize; i++) dstBuff[i]=(BYTE)i; }
/* benchmark loop */
{ U32 loopNb;
U32 nbRounds = (U32)((50 MB) / (srcSize+1)) + 1; /* initial conservative speed estimate */
# define TIME_SEC_MICROSEC (1*1000000ULL) /* 1 second */
# define TIME_SEC_NANOSEC (1*1000000000ULL) /* 1 second */
DISPLAY("%2i- %-30.30s : \r", benchNb, benchName);
for (loopNb = 1; loopNb <= g_nbIterations; loopNb++) {
UTIL_time_t clockStart;
size_t benchResult=0;
U32 roundNb;
UTIL_sleepMilli(5); /* give processor time to other processes */
UTIL_waitForNextTick();
clockStart = UTIL_getTime();
for (roundNb=0; roundNb < nbRounds; roundNb++) {
benchResult = benchFunction(dstBuff, dstBuffSize, buff2, src, srcSize);
if (ZSTD_isError(benchResult)) {
DISPLAY("ERROR ! %s() => %s !! \n", benchName, ZSTD_getErrorName(benchResult));
exit(1);
} }
{ U64 const clockSpanNano = UTIL_clockSpanNano(clockStart);
double const averageTime = (double)clockSpanNano / TIME_SEC_NANOSEC / nbRounds;
if (clockSpanNano > 0) {
if (averageTime < bestTime) bestTime = averageTime;
assert(bestTime > (1./2000000000));
nbRounds = (U32)(1. / bestTime); /* aim for 1 sec */
DISPLAY("%2i- %-30.30s : %7.1f MB/s (%9u)\r",
loopNb, benchName,
(double)srcSize / (1 MB) / bestTime,
(U32)benchResult);
} else {
assert(nbRounds < 40000000); /* avoid overflow */
nbRounds *= 100;
}
} } }
DISPLAY("%2u\n", benchNb);
/* benchmark loop */
{
BMK_customReturn_t r = BMK_benchCustom(benchName, 1, &src, &srcSize, (void * const * const)&dstBuff, &dstBuffSize, &local_nothing, benchFunction,
NULL, buff2, BMK_timeMode, 1, 2);
if(r.error) {
DISPLAY("ERROR %d ! ! \n", r.error);
exit(1);
}
DISPLAY("%2u#Speed: %f MB/s - Size: %f MB\n", benchNb, (double)srcSize / r.result.time * 1000, (double)r.result.size / 1000000);
}
_cleanOut:
free(dstBuff);