Merge pull request #1197 from facebook/poolResize

Thread Pool resize
This commit is contained in:
Yann Collet 2018-06-22 14:20:07 -07:00 committed by GitHub
commit 3934e010a2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 369 additions and 110 deletions

View File

@ -10,9 +10,10 @@
/* ====== Dependencies ======= */
#include <stddef.h> /* size_t */
#include "pool.h"
#include <stddef.h> /* size_t */
#include "debug.h" /* assert */
#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */
#include "pool.h"
/* ====== Compiler specifics ====== */
#if defined(_MSC_VER)
@ -33,8 +34,9 @@ typedef struct POOL_job_s {
struct POOL_ctx_s {
ZSTD_customMem customMem;
/* Keep track of the threads */
ZSTD_pthread_t *threads;
size_t numThreads;
ZSTD_pthread_t* threads;
size_t threadCapacity;
size_t threadLimit;
/* The queue is a circular buffer */
POOL_job *queue;
@ -58,10 +60,10 @@ struct POOL_ctx_s {
};
/* POOL_thread() :
Work thread for the thread pool.
Waits for jobs and executes them.
@returns : NULL on failure else non-null.
*/
* Work thread for the thread pool.
* Waits for jobs and executes them.
* @returns : NULL on failure else non-null.
*/
static void* POOL_thread(void* opaque) {
POOL_ctx* const ctx = (POOL_ctx*)opaque;
if (!ctx) { return NULL; }
@ -69,14 +71,17 @@ static void* POOL_thread(void* opaque) {
/* Lock the mutex and wait for a non-empty queue or until shutdown */
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
while (ctx->queueEmpty && !ctx->shutdown) {
while ( ctx->queueEmpty
|| (ctx->numThreadsBusy >= ctx->threadLimit) ) {
if (ctx->shutdown) {
/* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
* a few threads will be shutdown while !queueEmpty,
* but enough threads will remain active to finish the queue */
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
return opaque;
}
ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
}
/* empty => shutting down: so stop */
if (ctx->queueEmpty) {
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
return opaque;
}
/* Pop a job off the queue */
{ POOL_job const job = ctx->queue[ctx->queueHead];
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
@ -89,30 +94,32 @@ static void* POOL_thread(void* opaque) {
job.function(job.opaque);
/* If the intended queue size was 0, signal after finishing job */
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
ctx->numThreadsBusy--;
if (ctx->queueSize == 1) {
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
ctx->numThreadsBusy--;
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
} }
}
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
}
} /* for (;;) */
/* Unreachable */
assert(0); /* Unreachable */
}
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
}
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
ZSTD_customMem customMem) {
POOL_ctx* ctx;
/* Check the parameters */
/* Check parameters */
if (!numThreads) { return NULL; }
/* Allocate the context and zero initialize */
ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
if (!ctx) { return NULL; }
/* Initialize the job queue.
* It needs one extra space since one space is wasted to differentiate empty
* and full queues.
* It needs one extra space since one space is wasted to differentiate
* empty and full queues.
*/
ctx->queueSize = queueSize + 1;
ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);
@ -126,7 +133,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customM
ctx->shutdown = 0;
/* Allocate space for the thread handles */
ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
ctx->numThreads = 0;
ctx->threadCapacity = 0;
ctx->customMem = customMem;
/* Check for errors */
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
@ -134,11 +141,12 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customM
{ size_t i;
for (i = 0; i < numThreads; ++i) {
if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
ctx->numThreads = i;
ctx->threadCapacity = i;
POOL_free(ctx);
return NULL;
} }
ctx->numThreads = numThreads;
ctx->threadCapacity = numThreads;
ctx->threadLimit = numThreads;
}
return ctx;
}
@ -156,8 +164,8 @@ static void POOL_join(POOL_ctx* ctx) {
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
/* Join all of the threads */
{ size_t i;
for (i = 0; i < ctx->numThreads; ++i) {
ZSTD_pthread_join(ctx->threads[i], NULL);
for (i = 0; i < ctx->threadCapacity; ++i) {
ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */
} }
}
@ -172,24 +180,68 @@ void POOL_free(POOL_ctx *ctx) {
ZSTD_free(ctx, ctx->customMem);
}
size_t POOL_sizeof(POOL_ctx *ctx) {
if (ctx==NULL) return 0; /* supports sizeof NULL */
return sizeof(*ctx)
+ ctx->queueSize * sizeof(POOL_job)
+ ctx->numThreads * sizeof(ZSTD_pthread_t);
+ ctx->threadCapacity * sizeof(ZSTD_pthread_t);
}
/* @return : 0 on success, 1 on error */
static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
{
if (numThreads <= ctx->threadCapacity) {
if (!numThreads) return 1;
ctx->threadLimit = numThreads;
return 0;
}
/* numThreads > threadCapacity */
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
if (!threadPool) return 1;
/* replace existing thread pool */
memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
ZSTD_free(ctx->threads, ctx->customMem);
ctx->threads = threadPool;
/* Initialize additional threads */
{ size_t threadId;
for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
ctx->threadCapacity = threadId;
return 1;
} }
} }
/* successfully expanded */
ctx->threadCapacity = numThreads;
ctx->threadLimit = numThreads;
return 0;
}
/* @return : 0 on success, 1 on error */
int POOL_resize(POOL_ctx* ctx, size_t numThreads)
{
int result;
if (ctx==NULL) return 1;
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
result = POOL_resize_internal(ctx, numThreads);
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
return result;
}
/**
* Returns 1 if the queue is full and 0 otherwise.
*
* If the queueSize is 1 (the pool was created with an intended queueSize of 0),
* then a queue is empty if there is a thread free and no job is waiting.
* When queueSize is 1 (pool was created with an intended queueSize of 0),
* then a queue is empty if there is a thread free _and_ no job is waiting.
*/
static int isQueueFull(POOL_ctx const* ctx) {
if (ctx->queueSize > 1) {
return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
} else {
return ctx->numThreadsBusy == ctx->numThreads ||
return (ctx->numThreadsBusy == ctx->threadLimit) ||
!ctx->queueEmpty;
}
}
@ -263,6 +315,11 @@ void POOL_free(POOL_ctx* ctx) {
(void)ctx;
}
int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
(void)ctx; (void)numThreads;
return 0;
}
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
(void)ctx;
function(opaque);

View File

@ -30,40 +30,50 @@ typedef struct POOL_ctx_s POOL_ctx;
*/
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem);
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
ZSTD_customMem customMem);
/*! POOL_free() :
Free a thread pool returned by POOL_create().
*/
* Free a thread pool returned by POOL_create().
*/
void POOL_free(POOL_ctx* ctx);
/*! POOL_resize() :
* Expands or shrinks pool's number of threads.
* This is more efficient than releasing + creating a new context,
* since it tries to preserve and re-use existing threads.
* `numThreads` must be at least 1.
* @return : 0 when resize was successful,
* !0 (typically 1) if there is an error.
* note : only numThreads can be resized, queueSize remains unchanged.
*/
int POOL_resize(POOL_ctx* ctx, size_t numThreads);
/*! POOL_sizeof() :
return memory usage of pool returned by POOL_create().
*/
* @return threadpool memory usage
* note : compatible with NULL (returns 0 in this case)
*/
size_t POOL_sizeof(POOL_ctx* ctx);
/*! POOL_function :
The function type that can be added to a thread pool.
*/
* The function type that can be added to a thread pool.
*/
typedef void (*POOL_function)(void*);
/*! POOL_add_function :
The function type for a generic thread pool add function.
*/
typedef void (*POOL_add_function)(void*, POOL_function, void*);
/*! POOL_add() :
Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
Possibly blocks until there is room in the queue.
Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed.
*/
* Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
* Possibly blocks until there is room in the queue.
* Note : The function may be executed asynchronously,
* therefore, `opaque` must live until function has been completed.
*/
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
/*! POOL_tryAdd() :
Add the job `function(opaque)` to the thread pool if a worker is available.
return immediately otherwise.
@return : 1 if successful, 0 if not.
*/
* Add the job `function(opaque)` to thread pool _if_ a worker is available.
* Returns immediately even if not (does not block).
* @return : 1 if successful, 0 if not.
*/
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);

View File

@ -3647,13 +3647,9 @@ size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
}
if (params.nbWorkers > 0) {
/* mt context creation */
if (cctx->mtctx == NULL || (params.nbWorkers != ZSTDMT_getNbWorkers(cctx->mtctx))) {
if (cctx->mtctx == NULL) {
DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbWorkers=%u",
params.nbWorkers);
if (cctx->mtctx != NULL)
DEBUGLOG(4, "ZSTD_compress_generic: previous nbWorkers was %u",
ZSTDMT_getNbWorkers(cctx->mtctx));
ZSTDMT_freeCCtx(cctx->mtctx);
cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
if (cctx->mtctx == NULL) return ERROR(memory_allocation);
}

View File

@ -159,6 +159,25 @@ static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
}
static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers)
{
unsigned const maxNbBuffers = 2*nbWorkers + 3;
if (srcBufPool==NULL) return NULL;
if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
return srcBufPool;
/* need a larger buffer pool */
{ ZSTD_customMem const cMem = srcBufPool->cMem;
size_t const bSize = srcBufPool->bufferSize; /* forward parameters */
ZSTDMT_bufferPool* newBufPool;
ZSTDMT_freeBufferPool(srcBufPool);
newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
if (newBufPool==NULL) return newBufPool;
ZSTDMT_setBufferSize(newBufPool, bSize);
return newBufPool;
}
}
/** ZSTDMT_getBuffer() :
* assumption : bufPool must be valid
* @return : a buffer, with start pointer and size
@ -309,6 +328,10 @@ static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
ZSTDMT_freeBufferPool(seqPool);
}
static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
{
return ZSTDMT_expandBufferPool(pool, nbWorkers);
}
/* ===== CCtx Pool ===== */
@ -354,6 +377,18 @@ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbWorkers,
return cctxPool;
}
static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
unsigned nbWorkers)
{
if (srcPool==NULL) return NULL;
if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */
/* need a larger cctx pool */
{ ZSTD_customMem const cMem = srcPool->cMem;
ZSTDMT_freeCCtxPool(srcPool);
return ZSTDMT_createCCtxPool(nbWorkers, cMem);
}
}
/* only works during initialization phase, not during compression */
static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
{
@ -744,9 +779,9 @@ struct ZSTDMT_CCtx_s {
ZSTD_CCtx_params params;
size_t targetSectionSize;
size_t targetPrefixSize;
roundBuff_t roundBuff;
int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
inBuff_t inBuff;
int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create another one. */
roundBuff_t roundBuff;
serialState_t serial;
unsigned singleBlockingThread;
unsigned jobIDMask;
@ -797,6 +832,20 @@ static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_custom
return jobTable;
}
static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
U32 nbJobs = nbWorkers + 2;
if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */
ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
mtctx->jobIDMask = 0;
mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
if (mtctx->jobs==NULL) return ERROR(memory_allocation);
assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */
mtctx->jobIDMask = nbJobs - 1;
}
return 0;
}
/* ZSTDMT_CCtxParam_setNbWorkers():
* Internal use only */
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
@ -963,6 +1012,24 @@ static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
return jobParams;
}
/* ZSTDMT_resize() :
* @return : error code if fails, 0 on success */
static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
{
if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
return 0;
}
/*! ZSTDMT_updateCParams_whileCompressing() :
* Updates only a selected set of compression parameters, to remain compatible with current frame.
* New parameters will be applied to next compression job. */
@ -979,15 +1046,6 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p
}
}
/* ZSTDMT_getNbWorkers():
* @return nb threads currently active in mtctx.
* mtctx must be valid */
unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx)
{
assert(mtctx != NULL);
return mtctx->params.nbWorkers;
}
/* ZSTDMT_getFrameProgression():
* tells how much data has been consumed (input) and produced (output) for current frame.
* able to count progression inside worker threads.
@ -1088,15 +1146,7 @@ static size_t ZSTDMT_compress_advanced_internal(
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
return ERROR(memory_allocation);
if (nbJobs > mtctx->jobIDMask+1) { /* enlarge job table */
U32 jobsTableSize = nbJobs;
ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
mtctx->jobIDMask = 0;
mtctx->jobs = ZSTDMT_createJobsTable(&jobsTableSize, mtctx->cMem);
if (mtctx->jobs==NULL) return ERROR(memory_allocation);
assert((jobsTableSize != 0) && ((jobsTableSize & (jobsTableSize - 1)) == 0)); /* ensure jobsTableSize is a power of 2 */
mtctx->jobIDMask = jobsTableSize - 1;
}
CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
{ unsigned u;
for (u=0; u<nbJobs; u++) {
@ -1221,12 +1271,15 @@ size_t ZSTDMT_initCStream_internal(
{
DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
(U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
/* params are supposed to be fully validated at this point */
/* params supposed partially fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
/* init */
if (params.nbWorkers != mtctx->params.nbWorkers)
CHECK_F( ZSTDMT_resize(mtctx, params.nbWorkers) );
if (params.jobSize == 0) {
params.jobSize = 1U << ZSTDMT_computeTargetJobLog(params);
}

View File

@ -126,11 +126,6 @@ size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorker
* New parameters will be applied to next compression job. */
void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
/* ZSTDMT_getNbWorkers():
* @return nb threads currently active in mtctx.
* mtctx must be valid */
unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx);
/* ZSTDMT_getFrameProgression():
* tells how much data has been consumed (input) and produced (output) for current frame.
* able to count progression inside worker threads.

View File

@ -1108,15 +1108,11 @@ size_t ZSTD_execSequence(BYTE* op,
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
DEBUGLOG(2, "ZSTD_execSequence: found a 2-segments match")
{ size_t const length1 = dictEnd - match;
DEBUGLOG(2, "first part (extDict) is %zu bytes long", length1);
memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
DEBUGLOG(2, "second part (prefix) is %zu bytes long", sequence.matchLength);
match = prefixStart;
DEBUGLOG(2, "first byte of 2nd part : %02X", *prefixStart);
if (op > oend_w || sequence.matchLength < MINMATCH) {
U32 i;
for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];

View File

@ -15,11 +15,11 @@
#include <stddef.h>
#include <stdio.h>
#define ASSERT_TRUE(p) \
do { \
if (!(p)) { \
return 1; \
} \
#define ASSERT_TRUE(p) \
do { \
if (!(p)) { \
return 1; \
} \
} while (0)
#define ASSERT_FALSE(p) ASSERT_TRUE(!(p))
#define ASSERT_EQ(lhs, rhs) ASSERT_TRUE((lhs) == (rhs))
@ -32,10 +32,10 @@ struct data {
void fn(void *opaque) {
struct data *data = (struct data *)opaque;
pthread_mutex_lock(&data->mutex);
ZSTD_pthread_mutex_lock(&data->mutex);
data->data[data->i] = data->i;
++data->i;
pthread_mutex_unlock(&data->mutex);
ZSTD_pthread_mutex_unlock(&data->mutex);
}
int testOrder(size_t numThreads, size_t queueSize) {
@ -43,25 +43,26 @@ int testOrder(size_t numThreads, size_t queueSize) {
POOL_ctx *ctx = POOL_create(numThreads, queueSize);
ASSERT_TRUE(ctx);
data.i = 0;
pthread_mutex_init(&data.mutex, NULL);
{
size_t i;
ZSTD_pthread_mutex_init(&data.mutex, NULL);
{ size_t i;
for (i = 0; i < 16; ++i) {
POOL_add(ctx, &fn, &data);
}
}
POOL_free(ctx);
ASSERT_EQ(16, data.i);
{
size_t i;
{ size_t i;
for (i = 0; i < data.i; ++i) {
ASSERT_EQ(i, data.data[i]);
}
}
pthread_mutex_destroy(&data.mutex);
ZSTD_pthread_mutex_destroy(&data.mutex);
return 0;
}
/* --- test deadlocks --- */
void waitFn(void *opaque) {
(void)opaque;
UTIL_sleepMilli(1);
@ -72,8 +73,7 @@ int testWait(size_t numThreads, size_t queueSize) {
struct data data;
POOL_ctx *ctx = POOL_create(numThreads, queueSize);
ASSERT_TRUE(ctx);
{
size_t i;
{ size_t i;
for (i = 0; i < 16; ++i) {
POOL_add(ctx, &waitFn, &data);
}
@ -82,25 +82,177 @@ int testWait(size_t numThreads, size_t queueSize) {
return 0;
}
/* --- test POOL_resize() --- */
typedef struct {
ZSTD_pthread_mutex_t mut;
int val;
int max;
ZSTD_pthread_cond_t cond;
} poolTest_t;
void waitLongFn(void *opaque) {
poolTest_t* test = (poolTest_t*) opaque;
UTIL_sleepMilli(10);
ZSTD_pthread_mutex_lock(&test->mut);
test->val = test->val + 1;
if (test->val == test->max)
ZSTD_pthread_cond_signal(&test->cond);
ZSTD_pthread_mutex_unlock(&test->mut);
}
static int testThreadReduction_internal(POOL_ctx* ctx, poolTest_t test)
{
int const nbWaits = 16;
UTIL_time_t startTime, time4threads, time2threads;
test.val = 0;
test.max = nbWaits;
startTime = UTIL_getTime();
{ int i;
for (i=0; i<nbWaits; i++)
POOL_add(ctx, &waitLongFn, &test);
}
ZSTD_pthread_mutex_lock(&test.mut);
ZSTD_pthread_cond_wait(&test.cond, &test.mut);
ASSERT_EQ(test.val, nbWaits);
ZSTD_pthread_mutex_unlock(&test.mut);
time4threads = UTIL_clockSpanNano(startTime);
ASSERT_EQ( POOL_resize(ctx, 2/*nbThreads*/) , 0 );
test.val = 0;
startTime = UTIL_getTime();
{ int i;
for (i=0; i<nbWaits; i++)
POOL_add(ctx, &waitLongFn, &test);
}
ZSTD_pthread_mutex_lock(&test.mut);
ZSTD_pthread_cond_wait(&test.cond, &test.mut);
ASSERT_EQ(test.val, nbWaits);
ZSTD_pthread_mutex_unlock(&test.mut);
time2threads = UTIL_clockSpanNano(startTime);
if (time4threads >= time2threads) return 1; /* check 4 threads were effectively faster than 2 */
return 0;
}
static int testThreadReduction(void) {
int result;
poolTest_t test;
POOL_ctx* const ctx = POOL_create(4 /*nbThreads*/, 2 /*queueSize*/);
ASSERT_TRUE(ctx);
memset(&test, 0, sizeof(test));
ASSERT_FALSE( ZSTD_pthread_mutex_init(&test.mut, NULL) );
ASSERT_FALSE( ZSTD_pthread_cond_init(&test.cond, NULL) );
result = testThreadReduction_internal(ctx, test);
ZSTD_pthread_mutex_destroy(&test.mut);
ZSTD_pthread_cond_destroy(&test.cond);
POOL_free(ctx);
return result;
}
/* --- test abrupt ending --- */
typedef struct {
ZSTD_pthread_mutex_t mut;
int val;
} abruptEndCanary_t;
void waitIncFn(void *opaque) {
abruptEndCanary_t* test = (abruptEndCanary_t*) opaque;
UTIL_sleepMilli(10);
ZSTD_pthread_mutex_lock(&test->mut);
test->val = test->val + 1;
ZSTD_pthread_mutex_unlock(&test->mut);
}
static int testAbruptEnding_internal(abruptEndCanary_t test)
{
int const nbWaits = 16;
POOL_ctx* const ctx = POOL_create(3 /*numThreads*/, nbWaits /*queueSize*/);
ASSERT_TRUE(ctx);
test.val = 0;
{ int i;
for (i=0; i<nbWaits; i++)
POOL_add(ctx, &waitIncFn, &test); /* all jobs pushed into queue */
}
ASSERT_EQ( POOL_resize(ctx, 1 /*numThreads*/) , 0 ); /* downsize numThreads, to try to break end condition */
POOL_free(ctx); /* must finish all jobs in queue before giving back control */
ASSERT_EQ(test.val, nbWaits);
return 0;
}
static int testAbruptEnding(void) {
int result;
abruptEndCanary_t test;
memset(&test, 0, sizeof(test));
ASSERT_FALSE( ZSTD_pthread_mutex_init(&test.mut, NULL) );
result = testAbruptEnding_internal(test);
ZSTD_pthread_mutex_destroy(&test.mut);
return result;
}
/* --- test launcher --- */
int main(int argc, const char **argv) {
size_t numThreads;
(void)argc;
(void)argv;
if (POOL_create(0, 1)) { /* should not be possible */
printf("FAIL: should not create POOL with 0 threads\n");
return 1;
}
for (numThreads = 1; numThreads <= 4; ++numThreads) {
size_t queueSize;
for (queueSize = 0; queueSize <= 2; ++queueSize) {
printf("queueSize==%u, numThreads=%u \n",
(unsigned)queueSize, (unsigned)numThreads);
if (testOrder(numThreads, queueSize)) {
printf("FAIL: testOrder\n");
return 1;
}
printf("SUCCESS: testOrder\n");
if (testWait(numThreads, queueSize)) {
printf("FAIL: testWait\n");
return 1;
}
printf("SUCCESS: testWait\n");
}
}
printf("PASS: testOrder\n");
(void)argc;
(void)argv;
return (POOL_create(0, 1)) ? printf("FAIL: testInvalid\n"), 1
: printf("PASS: testInvalid\n"), 0;
if (testThreadReduction()) {
printf("FAIL: thread reduction not effective \n");
return 1;
} else {
printf("SUCCESS: thread reduction effective (slower execution) \n");
}
if (testAbruptEnding()) {
printf("FAIL: jobs in queue not completed on early end \n");
return 1;
} else {
printf("SUCCESS: all jobs in queue completed on early end \n");
}
printf("PASS: all POOL tests\n");
return 0;
}

View File

@ -1647,13 +1647,13 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest,
DISPLAYLEVEL(5, "Creating new context \n");
ZSTD_freeCCtx(zc);
zc = ZSTD_createCCtx();
CHECK(zc==NULL, "ZSTD_createCCtx allocation error");
resetAllowed=0;
CHECK(zc == NULL, "ZSTD_createCCtx allocation error");
resetAllowed = 0;
}
if ((FUZ_rand(&lseed) & 0xFF) == 132) {
ZSTD_freeDStream(zd);
zd = ZSTD_createDStream();
CHECK(zd==NULL, "ZSTD_createDStream allocation error");
CHECK(zd == NULL, "ZSTD_createDStream allocation error");
ZSTD_initDStream_usingDict(zd, NULL, 0); /* ensure at least one init */
}