Move Workspace Functions to Their Own File

This commit is contained in:
W. Felix Handte 2019-08-20 14:02:50 -04:00
parent 077a2d7dc9
commit b511a84adc
4 changed files with 379 additions and 294 deletions

View File

@ -38,209 +38,6 @@ size_t ZSTD_compressBound(size_t srcSize) {
}
/*-*************************************
* Workspace memory management
***************************************/
#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 /* when workspace is continuously too large
* during at least this number of times,
* context's memory usage is considered wasteful,
* because it's sized to handle a worst case scenario which rarely happens.
* In which case, resize it down to free some memory */
/**
* Align must be a power of 2.
*/
static size_t ZSTD_cwksp_align(size_t size, size_t const align) {
size_t const mask = align - 1;
assert((align & mask) == 0);
return (size + mask) & ~mask;
}
/**
* Internal function, use wrappers instead.
*/
static void* ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
/* TODO(felixh): alignment */
void* alloc = (BYTE *)ws->allocStart - bytes;
void* bottom = ws->tableEnd;
DEBUGLOG(3, "wksp: reserving align %zd bytes, %zd bytes remaining",
bytes, (BYTE *)alloc - (BYTE *)bottom);
assert(phase >= ws->phase);
if (phase > ws->phase) {
if (ws->phase < ZSTD_cwksp_alloc_buffers &&
phase >= ZSTD_cwksp_alloc_buffers) {
}
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
phase >= ZSTD_cwksp_alloc_aligned) {
/* If unaligned allocations down from a too-large top have left us
* unaligned, we need to realign our alloc ptr. Technically, this
* can consume space that is unaccounted for in the neededSpace
* calculation. However, I believe this can only happen when the
* workspace is too large, and specifically when it is too large
* by a larger margin than the space that will be consumed. */
/* TODO: cleaner, compiler warning friendly way to do this??? */
alloc = (BYTE*)alloc - ((size_t)alloc & (sizeof(U32)-1));
}
ws->phase = phase;
}
assert(alloc >= bottom);
if (alloc < bottom) {
ws->allocFailed = 1;
return NULL;
}
ws->allocStart = alloc;
return alloc;
}
/**
* Unaligned.
*/
static BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
}
/**
* Aligned on sizeof(unsigned).
*/
static void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
assert((bytes & (sizeof(U32)-1)) == 0); // TODO ???
return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
}
/**
* Aligned on sizeof(unsigned). These buffers have the special property that
* their values remain constrained, allowing us to re-use them without
* memset()-ing them.
*/
static void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
/* TODO(felixh): alignment */
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
void* alloc = ws->tableEnd;
void* end = (BYTE *)alloc + bytes;
void* top = ws->allocStart;
DEBUGLOG(3, "wksp: reserving table %zd bytes, %zd bytes remaining",
bytes, (BYTE *)top - (BYTE *)end);
assert((bytes & (sizeof(U32)-1)) == 0); // TODO ???
assert(phase >= ws->phase);
if (phase > ws->phase) {
if (ws->phase <= ZSTD_cwksp_alloc_buffers) {
}
ws->phase = phase;
}
assert(end <= top);
if (end > top) {
ws->allocFailed = 1;
return NULL;
}
ws->tableEnd = end;
return alloc;
}
/**
* Aligned on sizeof(void*).
*/
static void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
void* start = ws->objectEnd;
void* end = (BYTE*)start + roundedBytes;
DEBUGLOG(3, "wksp: reserving %zd bytes object (rounded to %zd), %zd bytes remaining", bytes, roundedBytes, (BYTE *)ws->workspaceEnd - (BYTE *)end);
assert(((size_t)start & (sizeof(void*)-1)) == 0);
assert((bytes & (sizeof(void*)-1)) == 0);
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
DEBUGLOG(3, "wksp: object alloc failed!");
ws->allocFailed = 1;
return NULL;
}
ws->objectEnd = end;
ws->tableEnd = end;
return start;
}
// TODO
static int ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp* ws) {
(void)ws;
// if (((BYTE*)ws->allocEnd - (BYTE*)ws->workspace) * ZSTD_WORKSPACETOOLARGE_FACTOR < (BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace) {
// ws->workspaceOversizedDuration++;
// } else {
// ws->workspaceOversizedDuration = 0;
// }
// return ws->workspaceOversizedDuration;
return 0;
}
/**
* Invalidates table allocations.
* All other allocations remain valid.
*/
static void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
ws->tableEnd = ws->objectEnd;
}
/**
* Invalidates all buffer, aligned, and table allocations.
* Object allocations remain valid.
*/
static void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
DEBUGLOG(3, "wksp: clearing!");
ZSTD_cwksp_bump_oversized_duration(ws);
ws->tableEnd = ws->objectEnd;
ws->allocStart = ws->workspaceEnd;
ws->allocFailed = 0;
if (ws->phase > ZSTD_cwksp_alloc_buffers) {
ws->phase = ZSTD_cwksp_alloc_buffers;
}
}
static void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
DEBUGLOG(3, "wksp: init'ing with %zd bytes", size);
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
ws->workspace = start;
ws->workspaceEnd = (BYTE*)start + size;
ws->objectEnd = ws->workspace;
ws->phase = ZSTD_cwksp_alloc_objects;
ZSTD_cwksp_clear(ws);
ws->workspaceOversizedDuration = 0;
}
static size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
void* workspace = ZSTD_malloc(size, customMem);
DEBUGLOG(3, "wksp: creating with %zd bytes", size);
RETURN_ERROR_IF(workspace == NULL, memory_allocation);
ZSTD_cwksp_init(ws, workspace, size);
return 0;
}
static void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
DEBUGLOG(3, "wksp: freeing");
ZSTD_free(ws->workspace, customMem);
ws->workspace = NULL;
ws->workspaceEnd = NULL;
ZSTD_cwksp_clear(ws);
}
static size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
}
static int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t minFree) {
return ZSTD_cwksp_available_space(ws) >= minFree;
}
static int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t minFree) {
return ZSTD_cwksp_check_available(ws, minFree * ZSTD_WORKSPACETOOLARGE_FACTOR) && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
}
static size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
return (BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace;
}
static int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
return ws->allocFailed;
}
/*-*************************************
* Context memory management
***************************************/

View File

@ -19,6 +19,7 @@
* Dependencies
***************************************/
#include "zstd_internal.h"
#include "zstd_cwksp.h"
#ifdef ZSTD_MULTITHREAD
# include "zstdmt_compress.h"
#endif
@ -223,97 +224,6 @@ struct ZSTD_CCtx_params_s {
ZSTD_customMem customMem;
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
typedef enum {
ZSTD_cwksp_alloc_objects,
ZSTD_cwksp_alloc_buffers,
ZSTD_cwksp_alloc_aligned
} ZSTD_cwksp_alloc_phase_e;
/**
* Zstd fits all its internal datastructures into a single continuous buffer,
* so that it only needs to perform a single OS allocation (or so that a buffer
* can be provided to it and it can perform no allocations at all). This buffer
* is called the workspace.
*
* Several optimizations complicate that process of allocating memory ranges
* from this workspace for each datastructure:
*
* - These different internal datastructures have different setup requirements.
* Some (e.g., the window buffer) don't care, and are happy to accept
* uninitialized memory. Others (e.g., the matchstate tables) can accept
* memory filled with unknown but bounded values (i.e., a memory area whose
* values are known to be constrained between 0 and some upper bound). If
* that constraint isn't known to be satisfied, the area has to be cleared.
*
* - We would like to reuse the objects in the workspace for multiple
* compressions without having to perform any expensive reallocation or
* reinitialization work.
*
* - We would like to be able to efficiently reuse the workspace across
* multiple compressions **even when the compression parameters change** and
* we need to resize some of the objects (where possible).
*
* Workspace Layout:
*
* [ ... workspace ... ]
* [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
*
* In order to accomplish this, the various objects that live in the workspace
* are divided into the following categories:
*
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
* so that literally everything fits in a single buffer. Note: if present,
* this must be the first object in the workspace, since ZSTD_free{CCtx,
* CDict}() rely on a pointer comparison to see whether one or two frees are
* required.
*
* - Fixed size objects: these are fixed-size, fixed-count objects that are
* nonetheless "dynamically" allocated in the workspace so that we can
* control how they're initialized separately from the broader ZSTD_CCtx.
* Examples:
* - Entropy Workspace
* - 2 x ZSTD_compressedBlockState_t
* - CDict dictionary contents
*
* - Tables: these are any of several different datastructures (hash tables,
* chain tables, binary trees) that all respect a common format: they are
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
* Their sizes depend on the cparams.
*
* - Aligned: these buffers are used for various purposes that don't require
* any initialization before they're used.
*
* - Uninitialized memory: these buffers are used for various purposes that
* don't require any initialization before they're used. This means they can
* be moved around at no cost for a new compression.
*
* Allocating Memory:
*
* The various types of objects must be allocated in order, so they can be
* correctly packed into the workspace buffer. That order is:
*
* 1. Objects
* 2. Buffers
* 3. Aligned
* 4. Tables
*
* Reusing Table Space:
*
* TODO(felixh): ...
*/
typedef struct {
void* workspace;
void* workspaceEnd;
void* objectEnd;
void* tableEnd;
void* allocStart;
int allocFailed;
int workspaceOversizedDuration;
ZSTD_cwksp_alloc_phase_e phase;
} ZSTD_cwksp;
struct ZSTD_CCtx_s {
ZSTD_compressionStage_e stage;
int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */

203
lib/compress/zstd_cwksp.c Normal file
View File

@ -0,0 +1,203 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "zstd_cwksp.h"
/**
* Align must be a power of 2.
*/
size_t ZSTD_cwksp_align(size_t size, size_t const align) {
size_t const mask = align - 1;
assert((align & mask) == 0);
return (size + mask) & ~mask;
}
/**
* Internal function, use wrappers instead.
*/
void* ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
/* TODO(felixh): alignment */
void* alloc = (BYTE *)ws->allocStart - bytes;
void* bottom = ws->tableEnd;
DEBUGLOG(3, "wksp: reserving align %zd bytes, %zd bytes remaining",
bytes, (BYTE *)alloc - (BYTE *)bottom);
assert(phase >= ws->phase);
if (phase > ws->phase) {
if (ws->phase < ZSTD_cwksp_alloc_buffers &&
phase >= ZSTD_cwksp_alloc_buffers) {
}
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
phase >= ZSTD_cwksp_alloc_aligned) {
/* If unaligned allocations down from a too-large top have left us
* unaligned, we need to realign our alloc ptr. Technically, this
* can consume space that is unaccounted for in the neededSpace
* calculation. However, I believe this can only happen when the
* workspace is too large, and specifically when it is too large
* by a larger margin than the space that will be consumed. */
/* TODO: cleaner, compiler warning friendly way to do this??? */
alloc = (BYTE*)alloc - ((size_t)alloc & (sizeof(U32)-1));
}
ws->phase = phase;
}
assert(alloc >= bottom);
if (alloc < bottom) {
ws->allocFailed = 1;
return NULL;
}
ws->allocStart = alloc;
return alloc;
}
/**
* Unaligned.
*/
BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
}
/**
* Aligned on sizeof(unsigned).
*/
void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
assert((bytes & (sizeof(U32)-1)) == 0); // TODO ???
return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
}
/**
* Aligned on sizeof(unsigned). These buffers have the special property that
* their values remain constrained, allowing us to re-use them without
* memset()-ing them.
*/
void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
/* TODO(felixh): alignment */
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
void* alloc = ws->tableEnd;
void* end = (BYTE *)alloc + bytes;
void* top = ws->allocStart;
DEBUGLOG(3, "wksp: reserving table %zd bytes, %zd bytes remaining",
bytes, (BYTE *)top - (BYTE *)end);
assert((bytes & (sizeof(U32)-1)) == 0); // TODO ???
assert(phase >= ws->phase);
if (phase > ws->phase) {
if (ws->phase <= ZSTD_cwksp_alloc_buffers) {
}
ws->phase = phase;
}
assert(end <= top);
if (end > top) {
ws->allocFailed = 1;
return NULL;
}
ws->tableEnd = end;
return alloc;
}
/**
* Aligned on sizeof(void*).
*/
void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
void* start = ws->objectEnd;
void* end = (BYTE*)start + roundedBytes;
DEBUGLOG(3, "wksp: reserving %zd bytes object (rounded to %zd), %zd bytes remaining", bytes, roundedBytes, (BYTE *)ws->workspaceEnd - (BYTE *)end);
assert(((size_t)start & (sizeof(void*)-1)) == 0);
assert((bytes & (sizeof(void*)-1)) == 0);
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
DEBUGLOG(3, "wksp: object alloc failed!");
ws->allocFailed = 1;
return NULL;
}
ws->objectEnd = end;
ws->tableEnd = end;
return start;
}
// TODO
int ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp* ws) {
(void)ws;
// if (((BYTE*)ws->allocEnd - (BYTE*)ws->workspace) * ZSTD_WORKSPACETOOLARGE_FACTOR < (BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace) {
// ws->workspaceOversizedDuration++;
// } else {
// ws->workspaceOversizedDuration = 0;
// }
// return ws->workspaceOversizedDuration;
return 0;
}
/**
* Invalidates table allocations.
* All other allocations remain valid.
*/
void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
ws->tableEnd = ws->objectEnd;
}
/**
* Invalidates all buffer, aligned, and table allocations.
* Object allocations remain valid.
*/
void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
DEBUGLOG(3, "wksp: clearing!");
ZSTD_cwksp_bump_oversized_duration(ws);
ws->tableEnd = ws->objectEnd;
ws->allocStart = ws->workspaceEnd;
ws->allocFailed = 0;
if (ws->phase > ZSTD_cwksp_alloc_buffers) {
ws->phase = ZSTD_cwksp_alloc_buffers;
}
}
void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
DEBUGLOG(3, "wksp: init'ing with %zd bytes", size);
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
ws->workspace = start;
ws->workspaceEnd = (BYTE*)start + size;
ws->objectEnd = ws->workspace;
ws->phase = ZSTD_cwksp_alloc_objects;
ZSTD_cwksp_clear(ws);
ws->workspaceOversizedDuration = 0;
}
size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
void* workspace = ZSTD_malloc(size, customMem);
DEBUGLOG(3, "wksp: creating with %zd bytes", size);
RETURN_ERROR_IF(workspace == NULL, memory_allocation);
ZSTD_cwksp_init(ws, workspace, size);
return 0;
}
void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
DEBUGLOG(3, "wksp: freeing");
ZSTD_free(ws->workspace, customMem);
ws->workspace = NULL;
ws->workspaceEnd = NULL;
ZSTD_cwksp_clear(ws);
}
size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
}
int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t minFree) {
return ZSTD_cwksp_available_space(ws) >= minFree;
}
int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t minFree) {
return ZSTD_cwksp_check_available(ws, minFree * ZSTD_WORKSPACETOOLARGE_FACTOR) && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
}
size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
return (BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace;
}
int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
return ws->allocFailed;
}

175
lib/compress/zstd_cwksp.h Normal file
View File

@ -0,0 +1,175 @@
/*
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_CWKSP_H
#define ZSTD_CWKSP_H
#include "zstd_internal.h"
#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 /* when workspace is continuously too large
* during at least this number of times,
* context's memory usage is considered wasteful,
* because it's sized to handle a worst case scenario which rarely happens.
* In which case, resize it down to free some memory */
typedef enum {
ZSTD_cwksp_alloc_objects,
ZSTD_cwksp_alloc_buffers,
ZSTD_cwksp_alloc_aligned
} ZSTD_cwksp_alloc_phase_e;
/**
* Zstd fits all its internal datastructures into a single continuous buffer,
* so that it only needs to perform a single OS allocation (or so that a buffer
* can be provided to it and it can perform no allocations at all). This buffer
* is called the workspace.
*
* Several optimizations complicate that process of allocating memory ranges
* from this workspace for each datastructure:
*
* - These different internal datastructures have different setup requirements.
* Some (e.g., the window buffer) don't care, and are happy to accept
* uninitialized memory. Others (e.g., the matchstate tables) can accept
* memory filled with unknown but bounded values (i.e., a memory area whose
* values are known to be constrained between 0 and some upper bound). If
* that constraint isn't known to be satisfied, the area has to be cleared.
*
* - We would like to reuse the objects in the workspace for multiple
* compressions without having to perform any expensive reallocation or
* reinitialization work.
*
* - We would like to be able to efficiently reuse the workspace across
* multiple compressions **even when the compression parameters change** and
* we need to resize some of the objects (where possible).
*
* Workspace Layout:
*
* [ ... workspace ... ]
* [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
*
* In order to accomplish this, the various objects that live in the workspace
* are divided into the following categories:
*
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
* so that literally everything fits in a single buffer. Note: if present,
* this must be the first object in the workspace, since ZSTD_free{CCtx,
* CDict}() rely on a pointer comparison to see whether one or two frees are
* required.
*
* - Fixed size objects: these are fixed-size, fixed-count objects that are
* nonetheless "dynamically" allocated in the workspace so that we can
* control how they're initialized separately from the broader ZSTD_CCtx.
* Examples:
* - Entropy Workspace
* - 2 x ZSTD_compressedBlockState_t
* - CDict dictionary contents
*
* - Tables: these are any of several different datastructures (hash tables,
* chain tables, binary trees) that all respect a common format: they are
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
* Their sizes depend on the cparams.
*
* - Aligned: these buffers are used for various purposes that don't require
* any initialization before they're used.
*
* - Uninitialized memory: these buffers are used for various purposes that
* don't require any initialization before they're used. This means they can
* be moved around at no cost for a new compression.
*
* Allocating Memory:
*
* The various types of objects must be allocated in order, so they can be
* correctly packed into the workspace buffer. That order is:
*
* 1. Objects
* 2. Buffers
* 3. Aligned
* 4. Tables
*
* Reusing Table Space:
*
* TODO(felixh): ...
*/
typedef struct {
void* workspace;
void* workspaceEnd;
void* objectEnd;
void* tableEnd;
void* allocStart;
int allocFailed;
int workspaceOversizedDuration;
ZSTD_cwksp_alloc_phase_e phase;
} ZSTD_cwksp;
/**
* Align must be a power of 2.
*/
size_t ZSTD_cwksp_align(size_t size, size_t const align);
/**
* Internal function, use wrappers instead.
*/
void* ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase);
/**
* Unaligned.
*/
BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes);
/**
* Aligned on sizeof(unsigned).
*/
void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes);
/**
* Aligned on sizeof(unsigned). These buffers have the special property that
* their values remain constrained, allowing us to re-use them without
* memset()-ing them.
*/
void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes);
/**
* Aligned on sizeof(void*).
*/
void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes);
int ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp* ws);
/**
* Invalidates table allocations.
* All other allocations remain valid.
*/
void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws);
/**
* Invalidates all buffer, aligned, and table allocations.
* Object allocations remain valid.
*/
void ZSTD_cwksp_clear(ZSTD_cwksp* ws);
void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size);
size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem);
void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem);
size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t minFree);
int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t minFree);
size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws);
int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws);
#endif /* ZSTD_CWKSP_H */