Merge pull request #2326 from terrelln/kernel-test-fix

Fix issues and warnings exposed by Kernel Test Robot
This commit is contained in:
Nick Terrell 2020-09-29 13:51:23 -07:00 committed by GitHub
commit d69d08ed6c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 653 additions and 276 deletions

View File

@ -21,6 +21,7 @@ from typing import Optional
INCLUDED_SUBDIRS = ["common", "compress", "decompress"] INCLUDED_SUBDIRS = ["common", "compress", "decompress"]
SKIPPED_FILES = [ SKIPPED_FILES = [
"common/mem.h",
"common/zstd_deps.h", "common/zstd_deps.h",
"common/pool.c", "common/pool.c",
"common/pool.h", "common/pool.h",
@ -77,7 +78,7 @@ class PartialPreprocessor(object):
fr"\s*#\s*{ELIF_GROUP}if\s+(?P<not>!)?\s*defined\s*\(\s*{MACRO_GROUP}\s*\)\s*{OP_GROUP}" fr"\s*#\s*{ELIF_GROUP}if\s+(?P<not>!)?\s*defined\s*\(\s*{MACRO_GROUP}\s*\)\s*{OP_GROUP}"
) )
self._if_defined_value = re.compile( self._if_defined_value = re.compile(
fr"\s*#\s*if\s+defined\s*\(\s*{MACRO_GROUP}\s*\)\s*" fr"\s*#\s*{ELIF_GROUP}if\s+defined\s*\(\s*{MACRO_GROUP}\s*\)\s*"
fr"(?P<op>&&)\s*" fr"(?P<op>&&)\s*"
fr"(?P<openp>\()?\s*" fr"(?P<openp>\()?\s*"
fr"(?P<macro2>[a-zA-Z_][a-zA-Z_0-9]*)\s*" fr"(?P<macro2>[a-zA-Z_][a-zA-Z_0-9]*)\s*"
@ -85,6 +86,9 @@ class PartialPreprocessor(object):
fr"(?P<value>[0-9]*)\s*" fr"(?P<value>[0-9]*)\s*"
fr"(?P<closep>\))?\s*" fr"(?P<closep>\))?\s*"
) )
self._if_true = re.compile(
fr"\s*#\s*{ELIF_GROUP}if\s+{MACRO_GROUP}\s*{OP_GROUP}"
)
self._c_comment = re.compile(r"/\*.*?\*/") self._c_comment = re.compile(r"/\*.*?\*/")
self._cpp_comment = re.compile(r"//") self._cpp_comment = re.compile(r"//")
@ -261,10 +265,14 @@ class PartialPreprocessor(object):
line = self._inlines[idx] line = self._inlines[idx]
sline = self._strip_comments(line) sline = self._strip_comments(line)
m = self._ifdef.fullmatch(sline) m = self._ifdef.fullmatch(sline)
if_true = False
if m is None: if m is None:
m = self._if_defined_value.fullmatch(sline) m = self._if_defined_value.fullmatch(sline)
if m is None: if m is None:
m = self._if_defined.match(sline) m = self._if_defined.match(sline)
if m is None:
m = self._if_true.match(sline)
if_true = (m is not None)
if m is None: if m is None:
outlines.append(line) outlines.append(line)
idx += 1 idx += 1
@ -272,22 +280,54 @@ class PartialPreprocessor(object):
groups = m.groupdict() groups = m.groupdict()
macro = groups['macro'] macro = groups['macro']
ifdef = groups.get('not') is None
elseif = groups.get('elif') is not None
op = groups.get('op') op = groups.get('op')
macro2 = groups.get('macro2')
cmp = groups.get('cmp')
value = groups.get('value')
openp = groups.get('openp')
closep = groups.get('closep')
if not (macro in self._defs or macro in self._undefs): if not (macro in self._defs or macro in self._undefs):
outlines.append(line) outlines.append(line)
idx += 1 idx += 1
continue continue
defined = macro in self._defs defined = macro in self._defs
# Needed variables set:
# resolved: Is the statement fully resolved?
# is_true: If resolved, is the statement true?
ifdef = False
if if_true:
if not defined:
outlines.append(line)
idx += 1
continue
defined_value = self._defs[macro]
is_int = True
try:
defined_value = int(defined_value)
except TypeError:
is_int = False
except ValueError:
is_int = False
resolved = is_int
is_true = (defined_value != 0)
if resolved and op is not None:
if op == '&&':
resolved = not is_true
else:
assert op == '||'
resolved = is_true
else:
ifdef = groups.get('not') is None
elseif = groups.get('elif') is not None
macro2 = groups.get('macro2')
cmp = groups.get('cmp')
value = groups.get('value')
openp = groups.get('openp')
closep = groups.get('closep')
is_true = (ifdef == defined) is_true = (ifdef == defined)
resolved = True resolved = True
if op is not None: if op is not None:
@ -386,13 +426,14 @@ class PartialPreprocessor(object):
class Freestanding(object): class Freestanding(object):
def __init__( def __init__(
self,zstd_deps: str, source_lib: str, output_lib: str, self, zstd_deps: str, mem: str, source_lib: str, output_lib: str,
external_xxhash: bool, xxh64_state: Optional[str], external_xxhash: bool, xxh64_state: Optional[str],
xxh64_prefix: Optional[str], rewritten_includes: [(str, str)], xxh64_prefix: Optional[str], rewritten_includes: [(str, str)],
defs: [(str, Optional[str])], replaces: [(str, str)], defs: [(str, Optional[str])], replaces: [(str, str)],
undefs: [str], excludes: [str] undefs: [str], excludes: [str]
): ):
self._zstd_deps = zstd_deps self._zstd_deps = zstd_deps
self._mem = mem
self._src_lib = source_lib self._src_lib = source_lib
self._dst_lib = output_lib self._dst_lib = output_lib
self._external_xxhash = external_xxhash self._external_xxhash = external_xxhash
@ -453,6 +494,11 @@ class Freestanding(object):
self._log(f"Copying zstd_deps: {self._zstd_deps} -> {dst_zstd_deps}") self._log(f"Copying zstd_deps: {self._zstd_deps} -> {dst_zstd_deps}")
shutil.copyfile(self._zstd_deps, dst_zstd_deps) shutil.copyfile(self._zstd_deps, dst_zstd_deps)
def _copy_mem(self):
dst_mem = os.path.join(self._dst_lib, "common", "mem.h")
self._log(f"Copying mem: {self._mem} -> {dst_mem}")
shutil.copyfile(self._mem, dst_mem)
def _hardwire_preprocessor(self, name: str, value: Optional[str] = None, undef=False): def _hardwire_preprocessor(self, name: str, value: Optional[str] = None, undef=False):
""" """
If value=None then hardwire that it is defined, but not what the value is. If value=None then hardwire that it is defined, but not what the value is.
@ -553,6 +599,7 @@ class Freestanding(object):
def go(self): def go(self):
self._copy_source_lib() self._copy_source_lib()
self._copy_zstd_deps() self._copy_zstd_deps()
self._copy_mem()
self._hardwire_defines() self._hardwire_defines()
self._remove_excludes() self._remove_excludes()
self._rewrite_includes() self._rewrite_includes()
@ -587,6 +634,7 @@ def parse_pair(rewritten_includes: [str]) -> [(str, str)]:
def main(name, args): def main(name, args):
parser = argparse.ArgumentParser(prog=name) parser = argparse.ArgumentParser(prog=name)
parser.add_argument("--zstd-deps", default="zstd_deps.h", help="Zstd dependencies file") parser.add_argument("--zstd-deps", default="zstd_deps.h", help="Zstd dependencies file")
parser.add_argument("--mem", default="mem.h", help="Memory module")
parser.add_argument("--source-lib", default="../../lib", help="Location of the zstd library") parser.add_argument("--source-lib", default="../../lib", help="Location of the zstd library")
parser.add_argument("--output-lib", default="./freestanding_lib", help="Where to output the freestanding zstd library") parser.add_argument("--output-lib", default="./freestanding_lib", help="Where to output the freestanding zstd library")
parser.add_argument("--xxhash", default=None, help="Alternate external xxhash include e.g. --xxhash='<xxhash.h>'. If set xxhash is not included.") parser.add_argument("--xxhash", default=None, help="Alternate external xxhash include e.g. --xxhash='<xxhash.h>'. If set xxhash is not included.")
@ -630,6 +678,7 @@ def main(name, args):
Freestanding( Freestanding(
args.zstd_deps, args.zstd_deps,
args.mem,
args.source_lib, args.source_lib,
args.output_lib, args.output_lib,
external_xxhash, external_xxhash,

View File

@ -34,6 +34,9 @@ libzstd:
-DMEM_FORCE_MEMORY_ACCESS=0 \ -DMEM_FORCE_MEMORY_ACCESS=0 \
-D__GNUC__ \ -D__GNUC__ \
-DSTATIC_BMI2=0 \ -DSTATIC_BMI2=0 \
-DZSTD_ADDRESS_SANITIZER=0 \
-DZSTD_MEMORY_SANITIZER=0 \
-DZSTD_COMPRESS_HEAPMODE=1 \
-UZSTD_NO_INLINE \ -UZSTD_NO_INLINE \
-UNO_PREFETCH \ -UNO_PREFETCH \
-U__cplusplus \ -U__cplusplus \

258
contrib/linux-kernel/mem.h Normal file
View File

@ -0,0 +1,258 @@
/*
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef MEM_H_MODULE
#define MEM_H_MODULE
/*-****************************************
* Dependencies
******************************************/
#include <asm/unaligned.h> /* get_unaligned, put_unaligned* */
#include <linux/compiler.h> /* inline */
#include <linux/swab.h> /* swab32, swab64 */
#include <linux/types.h> /* size_t, ptrdiff_t */
#include "debug.h" /* DEBUG_STATIC_ASSERT */
/*-****************************************
* Compiler specifics
******************************************/
#define MEM_STATIC static inline
/*-**************************************************************
* Basic Types
*****************************************************************/
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
/*-**************************************************************
* Memory I/O API
*****************************************************************/
/*=== Static platform detection ===*/
MEM_STATIC unsigned MEM_32bits(void);
MEM_STATIC unsigned MEM_64bits(void);
MEM_STATIC unsigned MEM_isLittleEndian(void);
/*=== Native unaligned read/write ===*/
MEM_STATIC U16 MEM_read16(const void* memPtr);
MEM_STATIC U32 MEM_read32(const void* memPtr);
MEM_STATIC U64 MEM_read64(const void* memPtr);
MEM_STATIC size_t MEM_readST(const void* memPtr);
MEM_STATIC void MEM_write16(void* memPtr, U16 value);
MEM_STATIC void MEM_write32(void* memPtr, U32 value);
MEM_STATIC void MEM_write64(void* memPtr, U64 value);
/*=== Little endian unaligned read/write ===*/
MEM_STATIC U16 MEM_readLE16(const void* memPtr);
MEM_STATIC U32 MEM_readLE24(const void* memPtr);
MEM_STATIC U32 MEM_readLE32(const void* memPtr);
MEM_STATIC U64 MEM_readLE64(const void* memPtr);
MEM_STATIC size_t MEM_readLEST(const void* memPtr);
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
/*=== Big endian unaligned read/write ===*/
MEM_STATIC U32 MEM_readBE32(const void* memPtr);
MEM_STATIC U64 MEM_readBE64(const void* memPtr);
MEM_STATIC size_t MEM_readBEST(const void* memPtr);
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
/*=== Byteswap ===*/
MEM_STATIC U32 MEM_swap32(U32 in);
MEM_STATIC U64 MEM_swap64(U64 in);
MEM_STATIC size_t MEM_swapST(size_t in);
/*-**************************************************************
* Memory I/O Implementation
*****************************************************************/
MEM_STATIC unsigned MEM_32bits(void)
{
return sizeof(size_t) == 4;
}
MEM_STATIC unsigned MEM_64bits(void)
{
return sizeof(size_t) == 8;
}
#if defined(__LITTLE_ENDIAN)
#define MEM_LITTLE_ENDIAN 1
#else
#define MEM_LITTLE_ENDIAN 0
#endif
MEM_STATIC unsigned MEM_isLittleEndian(void)
{
return MEM_LITTLE_ENDIAN;
}
MEM_STATIC U16 MEM_read16(const void *memPtr)
{
return get_unaligned((const U16 *)memPtr);
}
MEM_STATIC U32 MEM_read32(const void *memPtr)
{
return get_unaligned((const U32 *)memPtr);
}
MEM_STATIC U64 MEM_read64(const void *memPtr)
{
return get_unaligned((const U64 *)memPtr);
}
MEM_STATIC size_t MEM_readST(const void *memPtr)
{
return get_unaligned((const size_t *)memPtr);
}
MEM_STATIC void MEM_write16(void *memPtr, U16 value)
{
put_unaligned(value, (U16 *)memPtr);
}
MEM_STATIC void MEM_write32(void *memPtr, U32 value)
{
put_unaligned(value, (U32 *)memPtr);
}
MEM_STATIC void MEM_write64(void *memPtr, U64 value)
{
put_unaligned(value, (U64 *)memPtr);
}
/*=== Little endian r/w ===*/
MEM_STATIC U16 MEM_readLE16(const void *memPtr)
{
return get_unaligned_le16(memPtr);
}
MEM_STATIC void MEM_writeLE16(void *memPtr, U16 val)
{
put_unaligned_le16(val, memPtr);
}
MEM_STATIC U32 MEM_readLE24(const void *memPtr)
{
return MEM_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16);
}
MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val)
{
MEM_writeLE16(memPtr, (U16)val);
((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
}
MEM_STATIC U32 MEM_readLE32(const void *memPtr)
{
return get_unaligned_le32(memPtr);
}
MEM_STATIC void MEM_writeLE32(void *memPtr, U32 val32)
{
put_unaligned_le32(val32, memPtr);
}
MEM_STATIC U64 MEM_readLE64(const void *memPtr)
{
return get_unaligned_le64(memPtr);
}
MEM_STATIC void MEM_writeLE64(void *memPtr, U64 val64)
{
put_unaligned_le64(val64, memPtr);
}
MEM_STATIC size_t MEM_readLEST(const void *memPtr)
{
if (MEM_32bits())
return (size_t)MEM_readLE32(memPtr);
else
return (size_t)MEM_readLE64(memPtr);
}
MEM_STATIC void MEM_writeLEST(void *memPtr, size_t val)
{
if (MEM_32bits())
MEM_writeLE32(memPtr, (U32)val);
else
MEM_writeLE64(memPtr, (U64)val);
}
/*=== Big endian r/w ===*/
MEM_STATIC U32 MEM_readBE32(const void *memPtr)
{
return get_unaligned_be32(memPtr);
}
MEM_STATIC void MEM_writeBE32(void *memPtr, U32 val32)
{
put_unaligned_be32(val32, memPtr);
}
MEM_STATIC U64 MEM_readBE64(const void *memPtr)
{
return get_unaligned_be64(memPtr);
}
MEM_STATIC void MEM_writeBE64(void *memPtr, U64 val64)
{
put_unaligned_be64(val64, memPtr);
}
MEM_STATIC size_t MEM_readBEST(const void *memPtr)
{
if (MEM_32bits())
return (size_t)MEM_readBE32(memPtr);
else
return (size_t)MEM_readBE64(memPtr);
}
MEM_STATIC void MEM_writeBEST(void *memPtr, size_t val)
{
if (MEM_32bits())
MEM_writeBE32(memPtr, (U32)val);
else
MEM_writeBE64(memPtr, (U64)val);
}
MEM_STATIC U32 MEM_swap32(U32 in)
{
return swab32(in);
}
MEM_STATIC U64 MEM_swap64(U64 in)
{
return swab64(in);
}
MEM_STATIC size_t MEM_swapST(size_t in)
{
if (MEM_32bits())
return (size_t)MEM_swap32((U32)in);
else
return (size_t)MEM_swap64((U64)in);
}
#endif /* MEM_H_MODULE */

View File

@ -2,7 +2,6 @@
#define ASM_UNALIGNED_H #define ASM_UNALIGNED_H
#include <assert.h> #include <assert.h>
#include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#define _LITTLE_ENDIAN 1 #define _LITTLE_ENDIAN 1
@ -33,7 +32,7 @@ static uint64_t _swap64(uint64_t in)
static uint16_t get_unaligned_le16(const void* memPtr) static uint16_t get_unaligned_le16(const void* memPtr)
{ {
uint16_t val; uint16_t val;
memcpy(&val, memPtr, sizeof(val)); __builtin_memcpy(&val, memPtr, sizeof(val));
if (!_isLittleEndian()) _swap16(val); if (!_isLittleEndian()) _swap16(val);
return val; return val;
} }
@ -41,7 +40,7 @@ static uint16_t get_unaligned_le16(const void* memPtr)
static uint32_t get_unaligned_le32(const void* memPtr) static uint32_t get_unaligned_le32(const void* memPtr)
{ {
uint32_t val; uint32_t val;
memcpy(&val, memPtr, sizeof(val)); __builtin_memcpy(&val, memPtr, sizeof(val));
if (!_isLittleEndian()) _swap32(val); if (!_isLittleEndian()) _swap32(val);
return val; return val;
} }
@ -49,7 +48,7 @@ static uint32_t get_unaligned_le32(const void* memPtr)
static uint64_t get_unaligned_le64(const void* memPtr) static uint64_t get_unaligned_le64(const void* memPtr)
{ {
uint64_t val; uint64_t val;
memcpy(&val, memPtr, sizeof(val)); __builtin_memcpy(&val, memPtr, sizeof(val));
if (!_isLittleEndian()) _swap64(val); if (!_isLittleEndian()) _swap64(val);
return val; return val;
} }
@ -57,26 +56,26 @@ static uint64_t get_unaligned_le64(const void* memPtr)
static void put_unaligned_le16(uint16_t value, void* memPtr) static void put_unaligned_le16(uint16_t value, void* memPtr)
{ {
if (!_isLittleEndian()) value = _swap16(value); if (!_isLittleEndian()) value = _swap16(value);
memcpy(memPtr, &value, sizeof(value)); __builtin_memcpy(memPtr, &value, sizeof(value));
} }
static void put_unaligned_le32(uint32_t value, void* memPtr) static void put_unaligned_le32(uint32_t value, void* memPtr)
{ {
if (!_isLittleEndian()) value = _swap32(value); if (!_isLittleEndian()) value = _swap32(value);
memcpy(memPtr, &value, sizeof(value)); __builtin_memcpy(memPtr, &value, sizeof(value));
} }
static void put_unaligned_le64(uint64_t value, void* memPtr) static void put_unaligned_le64(uint64_t value, void* memPtr)
{ {
if (!_isLittleEndian()) value = _swap64(value); if (!_isLittleEndian()) value = _swap64(value);
memcpy(memPtr, &value, sizeof(value)); __builtin_memcpy(memPtr, &value, sizeof(value));
} }
/* big endian */ /* big endian */
static uint32_t get_unaligned_be32(const void* memPtr) static uint32_t get_unaligned_be32(const void* memPtr)
{ {
uint32_t val; uint32_t val;
memcpy(&val, memPtr, sizeof(val)); __builtin_memcpy(&val, memPtr, sizeof(val));
if (_isLittleEndian()) _swap32(val); if (_isLittleEndian()) _swap32(val);
return val; return val;
} }
@ -84,7 +83,7 @@ static uint32_t get_unaligned_be32(const void* memPtr)
static uint64_t get_unaligned_be64(const void* memPtr) static uint64_t get_unaligned_be64(const void* memPtr)
{ {
uint64_t val; uint64_t val;
memcpy(&val, memPtr, sizeof(val)); __builtin_memcpy(&val, memPtr, sizeof(val));
if (_isLittleEndian()) _swap64(val); if (_isLittleEndian()) _swap64(val);
return val; return val;
} }
@ -92,13 +91,13 @@ static uint64_t get_unaligned_be64(const void* memPtr)
static void put_unaligned_be32(uint32_t value, void* memPtr) static void put_unaligned_be32(uint32_t value, void* memPtr)
{ {
if (_isLittleEndian()) value = _swap32(value); if (_isLittleEndian()) value = _swap32(value);
memcpy(memPtr, &value, sizeof(value)); __builtin_memcpy(memPtr, &value, sizeof(value));
} }
static void put_unaligned_be64(uint64_t value, void* memPtr) static void put_unaligned_be64(uint64_t value, void* memPtr)
{ {
if (_isLittleEndian()) value = _swap64(value); if (_isLittleEndian()) value = _swap64(value);
memcpy(memPtr, &value, sizeof(value)); __builtin_memcpy(memPtr, &value, sizeof(value));
} }
/* generic */ /* generic */

View File

@ -7,9 +7,11 @@
* in the COPYING file in the root directory of this source tree). * in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses. * You may select, at your option, one of the above-listed licenses.
*/ */
#ifndef LINUX_STRING_H #ifndef LINUX_COMPILER_H
#define LINUX_STRING_H #define LINUX_COMPILER_H
#include <string.h> #ifndef inline
#define inline __inline __attribute__((unused))
#endif
#endif #endif

View File

@ -0,0 +1,16 @@
/*
* Copyright (c) 2016-2020, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef LINUX_SWAB_H
#define LINUX_SWAB_H
#define swab32(x) __builtin_bswap32((x))
#define swab64(x) __builtin_bswap64((x))
#endif

View File

@ -301,7 +301,6 @@ XXH_API void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/string.h>
#include <linux/xxhash.h> #include <linux/xxhash.h>
/*-************************************* /*-*************************************
@ -336,12 +335,12 @@ static const uint64_t PRIME64_5 = 2870177450012600261ULL;
***************************/ ***************************/
XXH_API void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src) XXH_API void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src)
{ {
memcpy(dst, src, sizeof(*dst)); __builtin_memcpy(dst, src, sizeof(*dst));
} }
XXH_API void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src) XXH_API void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src)
{ {
memcpy(dst, src, sizeof(*dst)); __builtin_memcpy(dst, src, sizeof(*dst));
} }
/*-*************************** /*-***************************
@ -498,12 +497,12 @@ XXH_API void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed)
/* use a local state for memcpy() to avoid strict-aliasing warnings */ /* use a local state for memcpy() to avoid strict-aliasing warnings */
struct xxh32_state state; struct xxh32_state state;
memset(&state, 0, sizeof(state)); __builtin_memset(&state, 0, sizeof(state));
state.v1 = seed + PRIME32_1 + PRIME32_2; state.v1 = seed + PRIME32_1 + PRIME32_2;
state.v2 = seed + PRIME32_2; state.v2 = seed + PRIME32_2;
state.v3 = seed + 0; state.v3 = seed + 0;
state.v4 = seed - PRIME32_1; state.v4 = seed - PRIME32_1;
memcpy(statePtr, &state, sizeof(state)); __builtin_memcpy(statePtr, &state, sizeof(state));
} }
XXH_API void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed) XXH_API void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)
@ -511,12 +510,12 @@ XXH_API void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)
/* use a local state for memcpy() to avoid strict-aliasing warnings */ /* use a local state for memcpy() to avoid strict-aliasing warnings */
struct xxh64_state state; struct xxh64_state state;
memset(&state, 0, sizeof(state)); __builtin_memset(&state, 0, sizeof(state));
state.v1 = seed + PRIME64_1 + PRIME64_2; state.v1 = seed + PRIME64_1 + PRIME64_2;
state.v2 = seed + PRIME64_2; state.v2 = seed + PRIME64_2;
state.v3 = seed + 0; state.v3 = seed + 0;
state.v4 = seed - PRIME64_1; state.v4 = seed - PRIME64_1;
memcpy(statePtr, &state, sizeof(state)); __builtin_memcpy(statePtr, &state, sizeof(state));
} }
XXH_API int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) XXH_API int xxh32_update(struct xxh32_state *state, const void *input, const size_t len)
@ -531,7 +530,7 @@ XXH_API int xxh32_update(struct xxh32_state *state, const void *input, const siz
state->large_len |= (len >= 16) | (state->total_len_32 >= 16); state->large_len |= (len >= 16) | (state->total_len_32 >= 16);
if (state->memsize + len < 16) { /* fill in tmp buffer */ if (state->memsize + len < 16) { /* fill in tmp buffer */
memcpy((uint8_t *)(state->mem32) + state->memsize, input, len); __builtin_memcpy((uint8_t *)(state->mem32) + state->memsize, input, len);
state->memsize += (uint32_t)len; state->memsize += (uint32_t)len;
return 0; return 0;
} }
@ -539,7 +538,7 @@ XXH_API int xxh32_update(struct xxh32_state *state, const void *input, const siz
if (state->memsize) { /* some data left from previous update */ if (state->memsize) { /* some data left from previous update */
const uint32_t *p32 = state->mem32; const uint32_t *p32 = state->mem32;
memcpy((uint8_t *)(state->mem32) + state->memsize, input, __builtin_memcpy((uint8_t *)(state->mem32) + state->memsize, input,
16 - state->memsize); 16 - state->memsize);
state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32)); state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32));
@ -580,7 +579,7 @@ XXH_API int xxh32_update(struct xxh32_state *state, const void *input, const siz
} }
if (p < b_end) { if (p < b_end) {
memcpy(state->mem32, p, (size_t)(b_end-p)); __builtin_memcpy(state->mem32, p, (size_t)(b_end-p));
state->memsize = (uint32_t)(b_end-p); state->memsize = (uint32_t)(b_end-p);
} }
@ -635,7 +634,7 @@ XXH_API int xxh64_update(struct xxh64_state *state, const void *input, const siz
state->total_len += len; state->total_len += len;
if (state->memsize + len < 32) { /* fill in tmp buffer */ if (state->memsize + len < 32) { /* fill in tmp buffer */
memcpy(((uint8_t *)state->mem64) + state->memsize, input, len); __builtin_memcpy(((uint8_t *)state->mem64) + state->memsize, input, len);
state->memsize += (uint32_t)len; state->memsize += (uint32_t)len;
return 0; return 0;
} }
@ -643,7 +642,7 @@ XXH_API int xxh64_update(struct xxh64_state *state, const void *input, const siz
if (state->memsize) { /* tmp buffer is full */ if (state->memsize) { /* tmp buffer is full */
uint64_t *p64 = state->mem64; uint64_t *p64 = state->mem64;
memcpy(((uint8_t *)p64) + state->memsize, input, __builtin_memcpy(((uint8_t *)p64) + state->memsize, input,
32 - state->memsize); 32 - state->memsize);
state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64)); state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64));
@ -683,7 +682,7 @@ XXH_API int xxh64_update(struct xxh64_state *state, const void *input, const siz
} }
if (p < b_end) { if (p < b_end) {
memcpy(state->mem64, p, (size_t)(b_end-p)); __builtin_memcpy(state->mem64, p, (size_t)(b_end-p));
state->memsize = (uint32_t)(b_end - p); state->memsize = (uint32_t)(b_end - p);
} }

View File

@ -8,36 +8,26 @@
* You may select, at your option, one of the above-listed licenses. * You may select, at your option, one of the above-listed licenses.
*/ */
/*
* This file provides common libc dependencies that zstd requires.
* The purpose is to allow replacing this file with a custom implementation
* to compile zstd without libc support.
*/
/* Need: /* Need:
* NULL * NULL
* INT_MAX
* UINT_MAX
* ZSTD_memcpy() * ZSTD_memcpy()
* ZSTD_memset() * ZSTD_memset()
* ZSTD_memmove() * ZSTD_memmove()
* BYTE
* S16
* U16
* U32
* U64
* size_t
* ptrdiff_t
* INT_MAX
* UINT_MAX
*/ */
#ifndef ZSTD_DEPS_COMMON #ifndef ZSTD_DEPS_COMMON
#define ZSTD_DEPS_COMMON #define ZSTD_DEPS_COMMON
#include <linux/limits.h> #include <linux/limits.h>
#include <linux/types.h>
#include <linux/stddef.h> #include <linux/stddef.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
#define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n)) #define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n))
#define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n)) #define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n))
#define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n)) #define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n))
@ -74,7 +64,7 @@ typedef int64_t S64;
#include <linux/math64.h> #include <linux/math64.h>
static U64 ZSTD_div64(U64 dividend, U32 divisor) { static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
return div_u64(dividend, divisor); return div_u64(dividend, divisor);
} }
@ -128,7 +118,7 @@ static U64 ZSTD_div64(U64 dividend, U32 divisor) {
* The Linux Kernel doesn't provide intptr_t, only uintptr_t, which * The Linux Kernel doesn't provide intptr_t, only uintptr_t, which
* is an unsigned long. * is an unsigned long.
*/ */
typedef long intptr_t typedef long intptr_t;
#endif /* ZSTD_DEPS_STDINT */ #endif /* ZSTD_DEPS_STDINT */
#endif /* ZSTD_DEPS_NEED_STDINT */ #endif /* ZSTD_DEPS_NEED_STDINT */

View File

@ -203,7 +203,7 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
size_t value, unsigned nbBits) size_t value, unsigned nbBits)
{ {
MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32); DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
assert(nbBits < BIT_MASK_SIZE); assert(nbBits < BIT_MASK_SIZE);
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;

View File

@ -196,4 +196,93 @@
#define STATIC_BMI2 0 #define STATIC_BMI2 0
#endif #endif
/* compat. with non-clang compilers */
#ifndef __has_builtin
# define __has_builtin(x) 0
#endif
/* compat. with non-clang compilers */
#ifndef __has_feature
# define __has_feature(x) 0
#endif
/* detects whether we are being compiled under msan */
#ifndef ZSTD_MEMORY_SANITIZER
# if __has_feature(memory_sanitizer)
# define ZSTD_MEMORY_SANITIZER 1
# else
# define ZSTD_MEMORY_SANITIZER 0
# endif
#endif
#if ZSTD_MEMORY_SANITIZER
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
* We therefore declare the functions we need ourselves, rather than trying to
* include the header file... */
#include <stddef.h> /* size_t */
#define ZSTD_DEPS_NEED_STDINT
#include "zstd_deps.h" /* intptr_t */
/* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size);
/* Make memory region fully uninitialized (without changing its contents).
This is a legacy interface that does not update origin information. Use
__msan_allocated_memory() instead. */
void __msan_poison(const volatile void *a, size_t size);
/* Returns the offset of the first (at least partially) poisoned byte in the
memory range, or -1 if the whole range is good. */
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
#endif
/* detects whether we are being compiled under asan */
#ifndef ZSTD_ADDRESS_SANITIZER
# if __has_feature(address_sanitizer)
# define ZSTD_ADDRESS_SANITIZER 1
# elif defined(__SANITIZE_ADDRESS__)
# define ZSTD_ADDRESS_SANITIZER 1
# else
# define ZSTD_ADDRESS_SANITIZER 0
# endif
#endif
#if ZSTD_ADDRESS_SANITIZER
/* Not all platforms that support asan provide sanitizers/asan_interface.h.
* We therefore declare the functions we need ourselves, rather than trying to
* include the header file... */
#include <stddef.h> /* size_t */
/**
* Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
*
* This memory must be previously allocated by your program. Instrumented
* code is forbidden from accessing addresses in this region until it is
* unpoisoned. This function is not guaranteed to poison the entire region -
* it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
* alignment restrictions.
*
* \note This function is not thread-safe because no two threads can poison or
* unpoison memory in the same memory region simultaneously.
*
* \param addr Start of memory region.
* \param size Size of memory region. */
void __asan_poison_memory_region(void const volatile *addr, size_t size);
/**
* Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
*
* This memory must be previously allocated by your program. Accessing
* addresses in this region is allowed until this region is poisoned again.
* This function could unpoison a super-region of <c>[addr, addr+size)</c> due
* to ASan alignment restrictions.
*
* \note This function is not thread-safe because no two threads can
* poison or unpoison memory in the same memory region simultaneously.
*
* \param addr Start of memory region.
* \param size Size of memory region. */
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
#endif
#endif /* ZSTD_COMPILER_H */ #endif /* ZSTD_COMPILER_H */

View File

@ -18,7 +18,10 @@ extern "C" {
/*-**************************************** /*-****************************************
* Dependencies * Dependencies
******************************************/ ******************************************/
#include "zstd_deps.h" /* size_t, ptrdiff_t, ZSTD_memcpy */ #include <stddef.h> /* size_t, ptrdiff_t */
#include "compiler.h" /* __has_builtin */
#include "debug.h" /* DEBUG_STATIC_ASSERT */
#include "zstd_deps.h" /* ZSTD_memcpy */
/*-**************************************** /*-****************************************
@ -38,91 +41,89 @@ extern "C" {
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif #endif
#ifndef __has_builtin /*-**************************************************************
# define __has_builtin(x) 0 /* compat. with non-clang compilers */ * Basic Types
*****************************************************************/
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
#else
# include <limits.h>
#if CHAR_BIT != 8
# error "this implementation requires char to be exactly 8-bit type"
#endif #endif
typedef unsigned char BYTE;
/* code only tested on 32 and 64 bits systems */ #if USHRT_MAX != 65535
#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } # error "this implementation requires short to be exactly 16-bit type"
MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
/* detects whether we are being compiled under msan */
#if defined (__has_feature)
# if __has_feature(memory_sanitizer)
# define MEMORY_SANITIZER 1
# endif
#endif #endif
typedef unsigned short U16;
#if defined (MEMORY_SANITIZER) typedef signed short S16;
/* Not all platforms that support msan provide sanitizers/msan_interface.h. #if UINT_MAX != 4294967295
* We therefore declare the functions we need ourselves, rather than trying to # error "this implementation requires int to be exactly 32-bit type"
* include the header file... */
#define ZS_DEPS_NEED_STDINT
#include "zstd_deps.h"
/* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size);
/* Make memory region fully uninitialized (without changing its contents).
This is a legacy interface that does not update origin information. Use
__msan_allocated_memory() instead. */
void __msan_poison(const volatile void *a, size_t size);
/* Returns the offset of the first (at least partially) poisoned byte in the
memory range, or -1 if the whole range is good. */
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
#endif #endif
typedef unsigned int U32;
/* detects whether we are being compiled under asan */ typedef signed int S32;
#if defined (__has_feature) /* note : there are no limits defined for long long type in C90.
# if __has_feature(address_sanitizer) * limits exist in C99, however, in such case, <stdint.h> is preferred */
# define ADDRESS_SANITIZER 1 typedef unsigned long long U64;
# endif typedef signed long long S64;
#elif defined(__SANITIZE_ADDRESS__)
# define ADDRESS_SANITIZER 1
#endif
#if defined (ADDRESS_SANITIZER)
/* Not all platforms that support asan provide sanitizers/asan_interface.h.
* We therefore declare the functions we need ourselves, rather than trying to
* include the header file... */
/**
* Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
*
* This memory must be previously allocated by your program. Instrumented
* code is forbidden from accessing addresses in this region until it is
* unpoisoned. This function is not guaranteed to poison the entire region -
* it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
* alignment restrictions.
*
* \note This function is not thread-safe because no two threads can poison or
* unpoison memory in the same memory region simultaneously.
*
* \param addr Start of memory region.
* \param size Size of memory region. */
void __asan_poison_memory_region(void const volatile *addr, size_t size);
/**
* Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
*
* This memory must be previously allocated by your program. Accessing
* addresses in this region is allowed until this region is poisoned again.
* This function could unpoison a super-region of <c>[addr, addr+size)</c> due
* to ASan alignment restrictions.
*
* \note This function is not thread-safe because no two threads can
* poison or unpoison memory in the same memory region simultaneously.
*
* \param addr Start of memory region.
* \param size Size of memory region. */
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
#endif #endif
/*-************************************************************** /*-**************************************************************
* Memory I/O * Memory I/O API
*****************************************************************/
/*=== Static platform detection ===*/
MEM_STATIC unsigned MEM_32bits(void);
MEM_STATIC unsigned MEM_64bits(void);
MEM_STATIC unsigned MEM_isLittleEndian(void);
/*=== Native unaligned read/write ===*/
MEM_STATIC U16 MEM_read16(const void* memPtr);
MEM_STATIC U32 MEM_read32(const void* memPtr);
MEM_STATIC U64 MEM_read64(const void* memPtr);
MEM_STATIC size_t MEM_readST(const void* memPtr);
MEM_STATIC void MEM_write16(void* memPtr, U16 value);
MEM_STATIC void MEM_write32(void* memPtr, U32 value);
MEM_STATIC void MEM_write64(void* memPtr, U64 value);
/*=== Little endian unaligned read/write ===*/
MEM_STATIC U16 MEM_readLE16(const void* memPtr);
MEM_STATIC U32 MEM_readLE24(const void* memPtr);
MEM_STATIC U32 MEM_readLE32(const void* memPtr);
MEM_STATIC U64 MEM_readLE64(const void* memPtr);
MEM_STATIC size_t MEM_readLEST(const void* memPtr);
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
/*=== Big endian unaligned read/write ===*/
MEM_STATIC U32 MEM_readBE32(const void* memPtr);
MEM_STATIC U64 MEM_readBE64(const void* memPtr);
MEM_STATIC size_t MEM_readBEST(const void* memPtr);
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
/*=== Byteswap ===*/
MEM_STATIC U32 MEM_swap32(U32 in);
MEM_STATIC U64 MEM_swap64(U64 in);
MEM_STATIC size_t MEM_swapST(size_t in);
/*-**************************************************************
* Memory I/O Implementation
*****************************************************************/ *****************************************************************/
/* MEM_FORCE_MEMORY_ACCESS : /* MEM_FORCE_MEMORY_ACCESS :
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
@ -410,6 +411,9 @@ MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
MEM_writeBE64(memPtr, (U64)val); MEM_writeBE64(memPtr, (U64)val);
} }
/* code only tested on 32 and 64 bits systems */
MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
#if defined (__cplusplus) #if defined (__cplusplus)
} }

View File

@ -93,32 +93,13 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_
/* ************************************* /* *************************************
* Compiler Specific Options * Compiler Specific Options
***************************************/ ***************************************/
#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ #include "compiler.h"
# define INLINE_KEYWORD inline
#else
# define INLINE_KEYWORD
#endif
#if defined(__GNUC__) || defined(__ICCARM__)
# define FORCE_INLINE_ATTR __attribute__((always_inline))
#elif defined(_MSC_VER)
# define FORCE_INLINE_ATTR __forceinline
#else
# define FORCE_INLINE_ATTR
#endif
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
#ifdef _MSC_VER
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
/* ************************************* /* *************************************
* Basic Types * Basic Types
***************************************/ ***************************************/
#include "mem.h" /* BYTE, U32, U64, size_t */
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))

View File

@ -8,19 +8,18 @@
* You may select, at your option, one of the above-listed licenses. * You may select, at your option, one of the above-listed licenses.
*/ */
/* This file provides common libc dependencies that zstd requires.
* The purpose is to allow replacing this file with a custom implementation
* to compile zstd without libc support.
*/
/* Need: /* Need:
* NULL
* INT_MAX
* UINT_MAX
* ZSTD_memcpy() * ZSTD_memcpy()
* ZSTD_memset() * ZSTD_memset()
* ZSTD_memmove() * ZSTD_memmove()
* BYTE
* S16
* U16
* U32
* U64
* size_t
* ptrdiff_t
* INT_MAX
* ...
*/ */
#ifndef ZSTD_DEPS_COMMON #ifndef ZSTD_DEPS_COMMON
#define ZSTD_DEPS_COMMON #define ZSTD_DEPS_COMMON
@ -39,40 +38,6 @@
# define ZSTD_memset(p,v,l) memset((p),(v),(l)) # define ZSTD_memset(p,v,l) memset((p),(v),(l))
#endif #endif
/*-**************************************************************
* Basic Types
*****************************************************************/
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
#else
# include <limits.h>
#if CHAR_BIT != 8
# error "this implementation requires char to be exactly 8-bit type"
#endif
typedef unsigned char BYTE;
#if USHRT_MAX != 65535
# error "this implementation requires short to be exactly 16-bit type"
#endif
typedef unsigned short U16;
typedef signed short S16;
#if UINT_MAX != 4294967295
# error "this implementation requires int to be exactly 32-bit type"
#endif
typedef unsigned int U32;
typedef signed int S32;
/* note : there are no limits defined for long long type in C90.
* limits exist in C99, however, in such case, <stdint.h> is preferred */
typedef unsigned long long U64;
typedef signed long long S64;
#endif
#endif /* ZSTD_DEPS_COMMON */ #endif /* ZSTD_DEPS_COMMON */
/* Need: /* Need:
@ -102,9 +67,7 @@
#ifndef ZSTD_DEPS_MATH64 #ifndef ZSTD_DEPS_MATH64
#define ZSTD_DEPS_MATH64 #define ZSTD_DEPS_MATH64
static U64 ZSTD_div64(U64 dividend, U32 divisor) { #define ZSTD_div64(dividend, divisor) ((dividend) / (divisor))
return dividend / divisor;
}
#endif /* ZSTD_DEPS_MATH64 */ #endif /* ZSTD_DEPS_MATH64 */
#endif /* ZSTD_DEPS_NEED_MATH64 */ #endif /* ZSTD_DEPS_NEED_MATH64 */

View File

@ -186,6 +186,10 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
#define OffFSELog 8 #define OffFSELog 8
#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog) #define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
/* Each table cannot take more than #symbols * FSELog bits */
#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = { static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,

View File

@ -29,6 +29,19 @@
#include "zstd_ldm.h" #include "zstd_ldm.h"
#include "zstd_compress_superblock.h" #include "zstd_compress_superblock.h"
/* ***************************************************************
* Tuning parameters
*****************************************************************/
/*!
* COMPRESS_HEAPMODE :
* Select how default decompression function ZSTD_compress() allocates its context,
* on stack (0, default), or into heap (1).
* Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
*/
#ifndef ZSTD_COMPRESS_HEAPMODE
# define ZSTD_COMPRESS_HEAPMODE 0
#endif
/*-************************************* /*-*************************************
* Helper functions * Helper functions
@ -81,7 +94,7 @@ ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
{ {
ZSTD_STATIC_ASSERT(zcss_init==0); ZSTD_STATIC_ASSERT(zcss_init==0);
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
if (!customMem.customAlloc ^ !customMem.customFree) return NULL; if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem); { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
if (!cctx) return NULL; if (!cctx) return NULL;
ZSTD_initCCtx(cctx, customMem); ZSTD_initCCtx(cctx, customMem);
@ -203,7 +216,7 @@ static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
ZSTD_customMem customMem) ZSTD_customMem customMem)
{ {
ZSTD_CCtx_params* params; ZSTD_CCtx_params* params;
if (!customMem.customAlloc ^ !customMem.customFree) return NULL; if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
params = (ZSTD_CCtx_params*)ZSTD_customCalloc( params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
sizeof(ZSTD_CCtx_params), customMem); sizeof(ZSTD_CCtx_params), customMem);
if (!params) { return NULL; } if (!params) { return NULL; }
@ -1902,7 +1915,7 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa
assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
assert(size < (1U<<31)); /* can be casted to int */ assert(size < (1U<<31)); /* can be casted to int */
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the table re-use logic is sound, and that we don't /* To validate that the table re-use logic is sound, and that we don't
* access table space that we haven't cleaned, we re-"poison" the table * access table space that we haven't cleaned, we re-"poison" the table
* space every time we mark it dirty. * space every time we mark it dirty.
@ -3370,10 +3383,17 @@ size_t ZSTD_compress(void* dst, size_t dstCapacity,
int compressionLevel) int compressionLevel)
{ {
size_t result; size_t result;
#if ZSTD_COMPRESS_HEAPMODE
ZSTD_CCtx* cctx = ZSTD_createCCtx();
RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
ZSTD_freeCCtx(cctx);;
#else
ZSTD_CCtx ctxBody; ZSTD_CCtx ctxBody;
ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */ ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */
#endif
return result; return result;
} }
@ -3467,7 +3487,7 @@ static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_compressionParameters cParams, ZSTD_customMem customMem) ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
{ {
if (!customMem.customAlloc ^ !customMem.customFree) return NULL; if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ size_t const workspaceSize = { size_t const workspaceSize =
ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
@ -3503,7 +3523,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
ZSTD_customMem customMem) ZSTD_customMem customMem)
{ {
ZSTD_CCtx_params cctxParams; ZSTD_CCtx_params cctxParams;
memset(&cctxParams, 0, sizeof(cctxParams)); ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
ZSTD_CCtxParams_init(&cctxParams, 0); ZSTD_CCtxParams_init(&cctxParams, 0);
cctxParams.cParams = cParams; cctxParams.cParams = cParams;
cctxParams.customMem = customMem; cctxParams.customMem = customMem;

View File

@ -29,7 +29,7 @@
* This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */ * This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */
typedef struct { typedef struct {
symbolEncodingType_e hType; symbolEncodingType_e hType;
BYTE hufDesBuffer[500]; /* TODO give name to this value */ BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
size_t hufDesSize; size_t hufDesSize;
} ZSTD_hufCTablesMetadata_t; } ZSTD_hufCTablesMetadata_t;
@ -42,7 +42,7 @@ typedef struct {
symbolEncodingType_e llType; symbolEncodingType_e llType;
symbolEncodingType_e ofType; symbolEncodingType_e ofType;
symbolEncodingType_e mlType; symbolEncodingType_e mlType;
BYTE fseTablesBuffer[500]; /* TODO give name to this value */ BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
size_t fseTablesSize; size_t fseTablesSize;
size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */ size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */
} ZSTD_fseCTablesMetadata_t; } ZSTD_fseCTablesMetadata_t;

View File

@ -178,7 +178,7 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
* else is though. * else is though.
*/ */
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
#else #else
return size; return size;
@ -228,7 +228,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
ZSTD_cwksp_internal_advance_phase(ws, phase); ZSTD_cwksp_internal_advance_phase(ws, phase);
alloc = (BYTE *)ws->allocStart - bytes; alloc = (BYTE *)ws->allocStart - bytes;
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* over-reserve space */ /* over-reserve space */
alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
#endif #endif
@ -247,7 +247,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
} }
ws->allocStart = alloc; ws->allocStart = alloc;
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
* either size. */ * either size. */
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
@ -296,7 +296,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
} }
ws->tableEnd = end; ws->tableEnd = end;
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
__asan_unpoison_memory_region(alloc, bytes); __asan_unpoison_memory_region(alloc, bytes);
#endif #endif
@ -311,7 +311,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
void* alloc = ws->objectEnd; void* alloc = ws->objectEnd;
void* end = (BYTE*)alloc + roundedBytes; void* end = (BYTE*)alloc + roundedBytes;
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* over-reserve space */ /* over-reserve space */
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
#endif #endif
@ -332,7 +332,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
ws->tableEnd = end; ws->tableEnd = end;
ws->tableValidEnd = end; ws->tableValidEnd = end;
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
* either size. */ * either size. */
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
@ -345,7 +345,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) { MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the table re-use logic is sound, and that we don't /* To validate that the table re-use logic is sound, and that we don't
* access table space that we haven't cleaned, we re-"poison" the table * access table space that we haven't cleaned, we re-"poison" the table
* space every time we mark it dirty. */ * space every time we mark it dirty. */
@ -392,7 +392,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) { MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: clearing tables!"); DEBUGLOG(4, "cwksp: clearing tables!");
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
{ {
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
__asan_poison_memory_region(ws->objectEnd, size); __asan_poison_memory_region(ws->objectEnd, size);
@ -410,7 +410,7 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
DEBUGLOG(4, "cwksp: clearing!"); DEBUGLOG(4, "cwksp: clearing!");
#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the context re-use logic is sound, and that we don't /* To validate that the context re-use logic is sound, and that we don't
* access stuff that this compression hasn't initialized, we re-"poison" * access stuff that this compression hasn't initialized, we re-"poison"
* the workspace (or at least the non-static, non-table parts of it) * the workspace (or at least the non-static, non-table parts of it)
@ -421,7 +421,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
} }
#endif #endif
#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
{ {
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
__asan_poison_memory_region(ws->objectEnd, size); __asan_poison_memory_region(ws->objectEnd, size);

View File

@ -147,7 +147,7 @@ ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType, ZSTD_dictContentType_e dictContentType,
ZSTD_customMem customMem) ZSTD_customMem customMem)
{ {
if (!customMem.customAlloc ^ !customMem.customFree) return NULL; if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem); { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
if (ddict == NULL) return NULL; if (ddict == NULL) return NULL;

View File

@ -136,7 +136,7 @@ ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
{ {
if (!customMem.customAlloc ^ !customMem.customFree) return NULL; if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem); { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
if (!dctx) return NULL; if (!dctx) return NULL;

View File

@ -457,7 +457,7 @@ static void ZWRAP_initDCtx(ZWRAP_DCtx* zwd)
static ZWRAP_DCtx* ZWRAP_createDCtx(z_streamp strm) static ZWRAP_DCtx* ZWRAP_createDCtx(z_streamp strm)
{ {
ZWRAP_DCtx* zwd; ZWRAP_DCtx* zwd;
MEM_STATIC_ASSERT(sizeof(zwd->headerBuf) >= ZSTD_HEADERSIZE); /* check static buffer size condition */ DEBUG_STATIC_ASSERT(sizeof(zwd->headerBuf) >= ZSTD_HEADERSIZE); /* check static buffer size condition */
if (strm->zalloc && strm->zfree) { if (strm->zalloc && strm->zfree) {
zwd = (ZWRAP_DCtx*)strm->zalloc(strm->opaque, 1, sizeof(ZWRAP_DCtx)); zwd = (ZWRAP_DCtx*)strm->zalloc(strm->opaque, 1, sizeof(ZWRAP_DCtx));