modified util::time API
for easier invocation. - no longer expose frequency timer : it's either useless, or stored internally in a static variable (init is only necessary once). - UTIL_getTime() provides result by function return.
This commit is contained in:
parent
5bbb465d3e
commit
c95c0c9725
@ -171,7 +171,6 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
|
||||
size_t cSize = 0;
|
||||
double ratio = 0.;
|
||||
U32 nbBlocks;
|
||||
UTIL_freq_t ticksPerSecond;
|
||||
|
||||
/* checks */
|
||||
if (!compressedBuffer || !resultBuffer || !blockTable || !ctx || !dctx)
|
||||
@ -179,7 +178,6 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
|
||||
|
||||
/* init */
|
||||
if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* display last 17 characters */
|
||||
UTIL_initTimer(&ticksPerSecond);
|
||||
|
||||
if (g_decodeOnly) { /* benchmark only decompression : source must be already compressed */
|
||||
const char* srcPtr = (const char*)srcBuffer;
|
||||
@ -239,15 +237,15 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
|
||||
const char* const marks[NB_MARKS] = { " |", " /", " =", "\\" };
|
||||
U32 markNb = 0;
|
||||
|
||||
UTIL_getTime(&coolTime);
|
||||
coolTime = UTIL_getTime();
|
||||
DISPLAYLEVEL(2, "\r%79s\r", "");
|
||||
while (!cCompleted || !dCompleted) {
|
||||
|
||||
/* overheat protection */
|
||||
if (UTIL_clockSpanMicro(coolTime, ticksPerSecond) > ACTIVEPERIOD_MICROSEC) {
|
||||
if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
|
||||
DISPLAYLEVEL(2, "\rcooling down ... \r");
|
||||
UTIL_sleep(COOLPERIOD_SEC);
|
||||
UTIL_getTime(&coolTime);
|
||||
coolTime = UTIL_getTime();
|
||||
}
|
||||
|
||||
if (!g_decodeOnly) {
|
||||
@ -257,8 +255,8 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
|
||||
if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */
|
||||
|
||||
UTIL_sleepMilli(1); /* give processor time to other processes */
|
||||
UTIL_waitForNextTick(ticksPerSecond);
|
||||
UTIL_getTime(&clockStart);
|
||||
UTIL_waitForNextTick();
|
||||
clockStart = UTIL_getTime();
|
||||
|
||||
if (!cCompleted) { /* still some time to do compression tests */
|
||||
U64 const clockLoop = g_nbSeconds ? TIMELOOP_MICROSEC : 1;
|
||||
@ -330,9 +328,9 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
|
||||
blockTable[blockNb].cSize = rSize;
|
||||
}
|
||||
nbLoops++;
|
||||
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
|
||||
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
|
||||
ZSTD_freeCDict(cdict);
|
||||
{ U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
|
||||
{ U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart);
|
||||
if (clockSpanMicro < fastestC*nbLoops) fastestC = clockSpanMicro / nbLoops;
|
||||
totalCTime += clockSpanMicro;
|
||||
cCompleted = (totalCTime >= maxTime);
|
||||
@ -357,15 +355,14 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
|
||||
if (!dCompleted) memset(resultBuffer, 0xD6, srcSize); /* warm result buffer */
|
||||
|
||||
UTIL_sleepMilli(1); /* give processor time to other processes */
|
||||
UTIL_waitForNextTick(ticksPerSecond);
|
||||
UTIL_waitForNextTick();
|
||||
|
||||
if (!dCompleted) {
|
||||
U64 clockLoop = g_nbSeconds ? TIMELOOP_MICROSEC : 1;
|
||||
U32 nbLoops = 0;
|
||||
UTIL_time_t clockStart;
|
||||
ZSTD_DDict* const ddict = ZSTD_createDDict(dictBuffer, dictBufferSize);
|
||||
UTIL_time_t const clockStart = UTIL_getTime();
|
||||
if (!ddict) EXM_THROW(2, "ZSTD_createDDict() allocation failure");
|
||||
UTIL_getTime(&clockStart);
|
||||
do {
|
||||
U32 blockNb;
|
||||
for (blockNb=0; blockNb<nbBlocks; blockNb++) {
|
||||
@ -380,9 +377,9 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
|
||||
blockTable[blockNb].resSize = regenSize;
|
||||
}
|
||||
nbLoops++;
|
||||
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
|
||||
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
|
||||
ZSTD_freeDDict(ddict);
|
||||
{ U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
|
||||
{ U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart);
|
||||
if (clockSpanMicro < fastestD*nbLoops) fastestD = clockSpanMicro / nbLoops;
|
||||
totalDTime += clockSpanMicro;
|
||||
dCompleted = (totalDTime >= maxTime);
|
||||
|
@ -118,38 +118,64 @@ static int g_utilDisplayLevel;
|
||||
* Time functions
|
||||
******************************************/
|
||||
#if defined(_WIN32) /* Windows */
|
||||
typedef LARGE_INTEGER UTIL_freq_t;
|
||||
typedef LARGE_INTEGER UTIL_time_t;
|
||||
UTIL_STATIC void UTIL_initTimer(UTIL_freq_t* ticksPerSecond) { if (!QueryPerformanceFrequency(ticksPerSecond)) UTIL_DISPLAYLEVEL(1, "ERROR: QueryPerformance not present\n"); }
|
||||
UTIL_STATIC void UTIL_getTime(UTIL_time_t* x) { QueryPerformanceCounter(x); }
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_freq_t ticksPerSecond, UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; }
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_freq_t ticksPerSecond, UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; }
|
||||
UTIL_STATIC UTIL_time_t UTIL_getTime(void) { UTIL_time_t x; QueryPerformanceCounter(&x); return x; }
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd)
|
||||
{
|
||||
static LARGE_INTEGER ticksPerSecond = 0;
|
||||
if (!ticksPerSecond) {
|
||||
if (!QueryPerformanceFrequency(&ticksPerSecond))
|
||||
UTIL_DISPLAYLEVEL(1, "ERROR: QueryPerformanceFrequency() failure\n");
|
||||
}
|
||||
return 1000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart;
|
||||
}
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd)
|
||||
{
|
||||
static LARGE_INTEGER ticksPerSecond = 0;
|
||||
if (!ticksPerSecond) {
|
||||
if (!QueryPerformanceFrequency(&ticksPerSecond))
|
||||
UTIL_DISPLAYLEVEL(1, "ERROR: QueryPerformanceFrequency() failure\n");
|
||||
}
|
||||
return 1000000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart;
|
||||
}
|
||||
#elif defined(__APPLE__) && defined(__MACH__)
|
||||
#include <mach/mach_time.h>
|
||||
typedef mach_timebase_info_data_t UTIL_freq_t;
|
||||
typedef U64 UTIL_time_t;
|
||||
UTIL_STATIC void UTIL_initTimer(UTIL_freq_t* rate) { mach_timebase_info(rate); }
|
||||
UTIL_STATIC void UTIL_getTime(UTIL_time_t* x) { *x = mach_absolute_time(); }
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_freq_t rate, UTIL_time_t clockStart, UTIL_time_t clockEnd) { return (((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom))/1000ULL; }
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_freq_t rate, UTIL_time_t clockStart, UTIL_time_t clockEnd) { return ((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom); }
|
||||
UTIL_STATIC UTIL_time_t UTIL_getTime(void) { return mach_absolute_time(); }
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd)
|
||||
{
|
||||
static mach_timebase_info_data_t rate;
|
||||
static int init = 0;
|
||||
if (!init) {
|
||||
mach_timebase_info(&rate);
|
||||
init = 1;
|
||||
}
|
||||
return (((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom))/1000ULL;
|
||||
}
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd)
|
||||
{
|
||||
static mach_timebase_info_data_t rate;
|
||||
static int init = 0;
|
||||
if (!init) {
|
||||
mach_timebase_info(&rate);
|
||||
init = 1;
|
||||
}
|
||||
return ((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom);
|
||||
}
|
||||
#elif (PLATFORM_POSIX_VERSION >= 200112L)
|
||||
#include <time.h>
|
||||
typedef struct timespec UTIL_freq_t;
|
||||
typedef struct timespec UTIL_time_t;
|
||||
UTIL_STATIC void UTIL_initTimer(UTIL_freq_t* res)
|
||||
UTIL_STATIC UTIL_time_t UTIL_getTime(void)
|
||||
{
|
||||
if (clock_getres(CLOCK_MONOTONIC, res))
|
||||
UTIL_DISPLAYLEVEL(1, "ERROR: Failed to init clock\n");
|
||||
UTIL_time_t time;
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &time))
|
||||
UTIL_DISPLAYLEVEL(1, "ERROR: Failed to get time\n"); /* we could also exit() */
|
||||
return time;
|
||||
}
|
||||
UTIL_STATIC void UTIL_getTime(UTIL_time_t* time)
|
||||
{
|
||||
if (clock_gettime(CLOCK_MONOTONIC, time))
|
||||
UTIL_DISPLAYLEVEL(1, "ERROR: Failed to get time\n");
|
||||
}
|
||||
UTIL_STATIC UTIL_time_t UTIL_getSpanTime(UTIL_freq_t res, UTIL_time_t begin, UTIL_time_t end)
|
||||
UTIL_STATIC UTIL_time_t UTIL_getSpanTime(UTIL_time_t begin, UTIL_time_t end)
|
||||
{
|
||||
UTIL_time_t diff;
|
||||
(void)res;
|
||||
if (end.tv_nsec < begin.tv_nsec) {
|
||||
diff.tv_sec = (end.tv_sec - 1) - begin.tv_sec;
|
||||
diff.tv_nsec = (end.tv_nsec + 1000000000ULL) - begin.tv_nsec;
|
||||
@ -159,48 +185,45 @@ static int g_utilDisplayLevel;
|
||||
}
|
||||
return diff;
|
||||
}
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_freq_t res, UTIL_time_t begin, UTIL_time_t end)
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end)
|
||||
{
|
||||
UTIL_time_t const diff = UTIL_getSpanTime(res, begin, end);
|
||||
UTIL_time_t const diff = UTIL_getSpanTime(begin, end);
|
||||
U64 micro = 0;
|
||||
micro += 1000000ULL * diff.tv_sec;
|
||||
micro += diff.tv_nsec / 1000ULL;
|
||||
return micro;
|
||||
}
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_freq_t res, UTIL_time_t begin, UTIL_time_t end)
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t begin, UTIL_time_t end)
|
||||
{
|
||||
UTIL_time_t const diff = UTIL_getSpanTime(res, begin, end);
|
||||
UTIL_time_t const diff = UTIL_getSpanTime(begin, end);
|
||||
U64 nano = 0;
|
||||
nano += 1000000000ULL * diff.tv_sec;
|
||||
nano += diff.tv_nsec;
|
||||
return nano;
|
||||
}
|
||||
#else /* relies on standard C (note : clock_t measurements can be wrong when using multi-threading) */
|
||||
typedef clock_t UTIL_freq_t;
|
||||
typedef clock_t UTIL_time_t;
|
||||
UTIL_STATIC void UTIL_initTimer(UTIL_freq_t* ticksPerSecond) { *ticksPerSecond=0; }
|
||||
UTIL_STATIC void UTIL_getTime(UTIL_time_t* x) { *x = clock(); }
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_freq_t ticksPerSecond, UTIL_time_t clockStart, UTIL_time_t clockEnd) { (void)ticksPerSecond; return 1000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; }
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_freq_t ticksPerSecond, UTIL_time_t clockStart, UTIL_time_t clockEnd) { (void)ticksPerSecond; return 1000000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; }
|
||||
UTIL_STATIC UTIL_time_t UTIL_getTime(void) { return clock(); }
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; }
|
||||
UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; }
|
||||
#endif
|
||||
|
||||
|
||||
/* returns time span in microseconds */
|
||||
UTIL_STATIC U64 UTIL_clockSpanMicro( UTIL_time_t clockStart, UTIL_freq_t ticksPerSecond )
|
||||
UTIL_STATIC U64 UTIL_clockSpanMicro( UTIL_time_t clockStart )
|
||||
{
|
||||
UTIL_time_t clockEnd;
|
||||
UTIL_getTime(&clockEnd);
|
||||
return UTIL_getSpanTimeMicro(ticksPerSecond, clockStart, clockEnd);
|
||||
UTIL_time_t const clockEnd = UTIL_getTime();
|
||||
return UTIL_getSpanTimeMicro(clockStart, clockEnd);
|
||||
}
|
||||
|
||||
|
||||
UTIL_STATIC void UTIL_waitForNextTick(UTIL_freq_t ticksPerSecond)
|
||||
UTIL_STATIC void UTIL_waitForNextTick()
|
||||
{
|
||||
UTIL_time_t clockStart, clockEnd;
|
||||
UTIL_getTime(&clockStart);
|
||||
UTIL_time_t const clockStart = UTIL_getTime();
|
||||
UTIL_time_t clockEnd;
|
||||
do {
|
||||
UTIL_getTime(&clockEnd);
|
||||
} while (UTIL_getSpanTimeNano(ticksPerSecond, clockStart, clockEnd) == 0);
|
||||
clockEnd = UTIL_getTime();
|
||||
} while (UTIL_getSpanTimeNano(clockStart, clockEnd) == 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -422,8 +422,6 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
|
||||
{ U32 loopNb;
|
||||
# define TIME_SEC_MICROSEC (1*1000000ULL) /* 1 second */
|
||||
U64 const clockLoop = TIMELOOP_S * TIME_SEC_MICROSEC;
|
||||
UTIL_freq_t ticksPerSecond;
|
||||
UTIL_initTimer(&ticksPerSecond);
|
||||
DISPLAY("%2i- %-30.30s : \r", benchNb, benchName);
|
||||
for (loopNb = 1; loopNb <= g_nbIterations; loopNb++) {
|
||||
UTIL_time_t clockStart;
|
||||
@ -431,13 +429,13 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
|
||||
U32 nbRounds;
|
||||
|
||||
UTIL_sleepMilli(1); /* give processor time to other processes */
|
||||
UTIL_waitForNextTick(ticksPerSecond);
|
||||
UTIL_getTime(&clockStart);
|
||||
for (nbRounds=0; UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop; nbRounds++) {
|
||||
UTIL_waitForNextTick();
|
||||
clockStart = UTIL_getTime();
|
||||
for (nbRounds=0; UTIL_clockSpanMicro(clockStart) < clockLoop; nbRounds++) {
|
||||
benchResult = benchFunction(dstBuff, dstBuffSize, buff2, src, srcSize);
|
||||
if (ZSTD_isError(benchResult)) { DISPLAY("ERROR ! %s() => %s !! \n", benchName, ZSTD_getErrorName(benchResult)); exit(1); }
|
||||
}
|
||||
{ U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
|
||||
{ U64 const clockSpanMicro = UTIL_clockSpanMicro(clockStart);
|
||||
double const averageTime = (double)clockSpanMicro / TIME_SEC_MICROSEC / nbRounds;
|
||||
if (averageTime < bestTime) bestTime = averageTime;
|
||||
DISPLAY("%2i- %-30.30s : %7.1f MB/s (%9u)\r", loopNb, benchName, (double)srcSize / (1 MB) / bestTime, (U32)benchResult);
|
||||
|
@ -161,7 +161,6 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
ZSTD_CCtx* const ctx = ZSTD_createCCtx();
|
||||
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
|
||||
U32 nbBlocks;
|
||||
UTIL_freq_t ticksPerSecond;
|
||||
|
||||
/* checks */
|
||||
if (!compressedBuffer || !resultBuffer || !blockTable || !ctx || !dctx)
|
||||
@ -169,7 +168,6 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
|
||||
/* init */
|
||||
if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* can only display 17 characters */
|
||||
UTIL_initTimer(&ticksPerSecond);
|
||||
|
||||
/* Init blockTable data */
|
||||
{ z_const char* srcPtr = (z_const char*)srcBuffer;
|
||||
@ -209,17 +207,17 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
size_t cSize = 0;
|
||||
double ratio = 0.;
|
||||
|
||||
UTIL_getTime(&coolTime);
|
||||
coolTime = UTIL_getTime();
|
||||
DISPLAYLEVEL(2, "\r%79s\r", "");
|
||||
while (!cCompleted | !dCompleted) {
|
||||
UTIL_time_t clockStart;
|
||||
U64 clockLoop = g_nbIterations ? TIMELOOP_MICROSEC : 1;
|
||||
|
||||
/* overheat protection */
|
||||
if (UTIL_clockSpanMicro(coolTime, ticksPerSecond) > ACTIVEPERIOD_MICROSEC) {
|
||||
if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
|
||||
DISPLAYLEVEL(2, "\rcooling down ... \r");
|
||||
UTIL_sleep(COOLPERIOD_SEC);
|
||||
UTIL_getTime(&coolTime);
|
||||
coolTime = UTIL_getTime();
|
||||
}
|
||||
|
||||
/* Compression */
|
||||
@ -227,8 +225,8 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */
|
||||
|
||||
UTIL_sleepMilli(1); /* give processor time to other processes */
|
||||
UTIL_waitForNextTick(ticksPerSecond);
|
||||
UTIL_getTime(&clockStart);
|
||||
UTIL_waitForNextTick();
|
||||
clockStart = UTIL_getTime();
|
||||
|
||||
if (!cCompleted) { /* still some time to do compression tests */
|
||||
U32 nbLoops = 0;
|
||||
@ -256,7 +254,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
blockTable[blockNb].cSize = rSize;
|
||||
}
|
||||
nbLoops++;
|
||||
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
|
||||
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
|
||||
ZSTD_freeCDict(cdict);
|
||||
} else if (compressor == BMK_ZSTD_STREAM) {
|
||||
ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
|
||||
@ -285,7 +283,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
blockTable[blockNb].cSize = outBuffer.pos;
|
||||
}
|
||||
nbLoops++;
|
||||
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
|
||||
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
|
||||
ZSTD_freeCStream(zbc);
|
||||
} else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) {
|
||||
z_stream def;
|
||||
@ -326,7 +324,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
blockTable[blockNb].cSize = def.total_out;
|
||||
}
|
||||
nbLoops++;
|
||||
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
|
||||
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
|
||||
ret = deflateEnd(&def);
|
||||
if (ret != Z_OK) EXM_THROW(1, "deflateEnd failure");
|
||||
} else {
|
||||
@ -359,9 +357,9 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
blockTable[blockNb].cSize = def.total_out;
|
||||
}
|
||||
nbLoops++;
|
||||
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
|
||||
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
|
||||
}
|
||||
{ U64 const clockSpan = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
|
||||
{ U64 const clockSpan = UTIL_clockSpanMicro(clockStart);
|
||||
if (clockSpan < fastestC*nbLoops) fastestC = clockSpan / nbLoops;
|
||||
totalCTime += clockSpan;
|
||||
cCompleted = totalCTime>maxTime;
|
||||
@ -381,8 +379,8 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
if (!dCompleted) memset(resultBuffer, 0xD6, srcSize); /* warm result buffer */
|
||||
|
||||
UTIL_sleepMilli(1); /* give processor time to other processes */
|
||||
UTIL_waitForNextTick(ticksPerSecond);
|
||||
UTIL_getTime(&clockStart);
|
||||
UTIL_waitForNextTick();
|
||||
clockStart = UTIL_getTime();
|
||||
|
||||
if (!dCompleted) {
|
||||
U32 nbLoops = 0;
|
||||
@ -405,7 +403,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
blockTable[blockNb].resSize = regenSize;
|
||||
}
|
||||
nbLoops++;
|
||||
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
|
||||
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
|
||||
ZSTD_freeDDict(ddict);
|
||||
} else if (compressor == BMK_ZSTD_STREAM) {
|
||||
ZSTD_inBuffer inBuffer;
|
||||
@ -431,7 +429,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
blockTable[blockNb].resSize = outBuffer.pos;
|
||||
}
|
||||
nbLoops++;
|
||||
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
|
||||
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
|
||||
ZSTD_freeDStream(zbd);
|
||||
} else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) {
|
||||
z_stream inf;
|
||||
@ -467,7 +465,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
blockTable[blockNb].resSize = inf.total_out;
|
||||
}
|
||||
nbLoops++;
|
||||
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
|
||||
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
|
||||
ret = inflateEnd(&inf);
|
||||
if (ret != Z_OK) EXM_THROW(1, "inflateEnd failure");
|
||||
} else {
|
||||
@ -501,9 +499,9 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
|
||||
blockTable[blockNb].resSize = inf.total_out;
|
||||
}
|
||||
nbLoops++;
|
||||
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
|
||||
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
|
||||
}
|
||||
{ U64 const clockSpan = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
|
||||
{ U64 const clockSpan = UTIL_clockSpanMicro(clockStart);
|
||||
if (clockSpan < fastestD*nbLoops) fastestD = clockSpan / nbLoops;
|
||||
totalDTime += clockSpan;
|
||||
dCompleted = totalDTime>maxTime;
|
||||
|
Loading…
Reference in New Issue
Block a user