Merge remote-tracking branch 'upstream/dev' into dev

This commit is contained in:
Paul Cruz 2017-07-31 11:43:40 -07:00
commit cf92221671
48 changed files with 2768 additions and 883 deletions

5
NEWS
View File

@ -1,6 +1,9 @@
v1.3.1
perf: substantially decreased memory usage in Multi-threading mode, thanks to reports by Tino Reichardt
perf: Multi-threading supports up to 256 threads. Cap at 256 when more are requested (#760)
build: fix Visual compilation for non x86/x64 targets, reported by Greg Slazinski (#718)
API exp : breaking change : ZSTD_getframeHeader()
API exp : breaking change : ZSTD_getframeHeader() provides more information
API exp : breaking change : pinned down values of error codes
v1.3.0
cli : new : `--list` command, by Paul Cruz

View File

@ -31,6 +31,9 @@ ENDIF (MSVC)
ADD_EXECUTABLE(zstd ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/bench.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/dibio.c ${PlatformDependResources})
TARGET_LINK_LIBRARIES(zstd libzstd_static)
IF (CMAKE_SYSTEM_NAME MATCHES "(Solaris|SunOS)")
TARGET_LINK_LIBRARIES(zstd rt)
ENDIF (CMAKE_SYSTEM_NAME MATCHES "(Solaris|SunOS)")
INSTALL(TARGETS zstd RUNTIME DESTINATION "bin")
IF (UNIX)

View File

@ -1,7 +1,7 @@
From 8bc9a0ae5c86a6d02d9a5274b9965ddac0e8d330 Mon Sep 17 00:00:00 2001
From 0cd63464d182bb9708f8b25f7da3dc8e5ec6b4fa Mon Sep 17 00:00:00 2001
From: Nick Terrell <terrelln@fb.com>
Date: Wed, 28 Jun 2017 22:00:00 -0700
Subject: [PATCH v2 0/4] Add xxhash and zstd modules
Date: Thu, 20 Jul 2017 13:18:30 -0700
Subject: [PATCH v3 0/4] Add xxhash and zstd modules
Hi all,
@ -24,6 +24,13 @@ v1 -> v2:
HUF_compressWeights(), HUF_readDTableX2(), and HUF_readDTableX4() (2/4)
- No zstd function uses more than 400 B of stack space (2/4)
v2 -> v3:
- Work around gcc-7 bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388
(2/4)
- Fix bug in dictionary compression from upstream commit cc1522351f (2/4)
- Port upstream BtrFS commits e1ddce71d6, 389a6cfc2a, and 6acafd1eff (3/4)
- Change default compression level for BtrFS to 3 (3/4)
Nick Terrell (4):
lib: Add xxhash module
lib: Add zstd modules
@ -40,7 +47,7 @@ Nick Terrell (4):
fs/btrfs/props.c | 6 +
fs/btrfs/super.c | 12 +-
fs/btrfs/sysfs.c | 2 +
fs/btrfs/zstd.c | 433 ++++++
fs/btrfs/zstd.c | 435 ++++++
fs/squashfs/Kconfig | 14 +
fs/squashfs/Makefile | 1 +
fs/squashfs/decompressor.c | 7 +
@ -63,13 +70,13 @@ Nick Terrell (4):
lib/zstd/fse_compress.c | 795 ++++++++++
lib/zstd/fse_decompress.c | 332 +++++
lib/zstd/huf.h | 212 +++
lib/zstd/huf_compress.c | 771 ++++++++++
lib/zstd/huf_compress.c | 770 ++++++++++
lib/zstd/huf_decompress.c | 960 ++++++++++++
lib/zstd/mem.h | 151 ++
lib/zstd/zstd_common.c | 75 +
lib/zstd/zstd_internal.h | 269 ++++
lib/zstd/zstd_internal.h | 250 ++++
lib/zstd/zstd_opt.h | 1014 +++++++++++++
39 files changed, 14400 insertions(+), 12 deletions(-)
39 files changed, 14382 insertions(+), 12 deletions(-)
create mode 100644 fs/btrfs/zstd.c
create mode 100644 fs/squashfs/zstd_wrapper.c
create mode 100644 include/linux/xxhash.h

View File

@ -1,7 +1,7 @@
From 5ac909c415ab4a18fd90794793c96e450795e8c6 Mon Sep 17 00:00:00 2001
From fc7f26acbabda35f1c61dfc357dbb207dc8ed23d Mon Sep 17 00:00:00 2001
From: Nick Terrell <terrelln@fb.com>
Date: Wed, 21 Jun 2017 17:37:36 -0700
Subject: [PATCH v2 1/4] lib: Add xxhash module
Date: Mon, 17 Jul 2017 17:07:18 -0700
Subject: [PATCH v3 1/4] lib: Add xxhash module
Adds xxhash kernel module with xxh32 and xxh64 hashes. xxhash is an
extremely fast non-cryptographic hash algorithm for checksumming.
@ -327,10 +327,10 @@ index 0000000..9e1f42c
+
+#endif /* XXHASH_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 0c8b78a..b6009d7 100644
index 6762529..5e7541f 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -184,6 +184,9 @@ config CRC8
@@ -192,6 +192,9 @@ config CRC8
when they need to do cyclic redundancy check according CRC8
algorithm. Module will be called crc8.
@ -341,10 +341,10 @@ index 0c8b78a..b6009d7 100644
bool
depends on AUDIT && !AUDIT_ARCH
diff --git a/lib/Makefile b/lib/Makefile
index 0166fbc..1338226 100644
index 40c1837..d06b68a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
@@ -102,6 +102,7 @@ obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_CRC8) += crc8.o

View File

@ -1,7 +1,7 @@
From d2626127c6d6e60e940dd9a3ed58323bdcdc4930 Mon Sep 17 00:00:00 2001
From 686a6149b98250d66b5951e3ae05e79063e9de98 Mon Sep 17 00:00:00 2001
From: Nick Terrell <terrelln@fb.com>
Date: Tue, 16 May 2017 14:55:36 -0700
Subject: [PATCH v2 2/4] lib: Add zstd modules
Date: Mon, 17 Jul 2017 17:08:19 -0700
Subject: [PATCH v3 2/4] lib: Add zstd modules
Add zstd compression and decompression kernel modules.
zstd offers a wide varity of compression speed and quality trade-offs.
@ -110,6 +110,10 @@ v1 -> v2:
HUF_compressWeights(), HUF_readDTableX2(), and HUF_readDTableX4()
- No function uses more than 400 B of stack space
v2 -> v3:
- Work around gcc-7 bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388
- Fix bug in dictionary compression from upstream commit cc1522351f
include/linux/zstd.h | 1157 +++++++++++++++
lib/Kconfig | 8 +
lib/Makefile | 2 +
@ -123,13 +127,13 @@ v1 -> v2:
lib/zstd/fse_compress.c | 795 +++++++++++
lib/zstd/fse_decompress.c | 332 +++++
lib/zstd/huf.h | 212 +++
lib/zstd/huf_compress.c | 771 ++++++++++
lib/zstd/huf_compress.c | 770 ++++++++++
lib/zstd/huf_decompress.c | 960 +++++++++++++
lib/zstd/mem.h | 151 ++
lib/zstd/zstd_common.c | 75 +
lib/zstd/zstd_internal.h | 269 ++++
lib/zstd/zstd_internal.h | 250 ++++
lib/zstd/zstd_opt.h | 1014 +++++++++++++
19 files changed, 13014 insertions(+)
19 files changed, 12994 insertions(+)
create mode 100644 include/linux/zstd.h
create mode 100644 lib/zstd/Makefile
create mode 100644 lib/zstd/bitstream.h
@ -1312,10 +1316,10 @@ index 0000000..249575e
+
+#endif /* ZSTD_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index b6009d7..f00ddab 100644
index 5e7541f..0d49ed0 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -241,6 +241,14 @@ config LZ4HC_COMPRESS
@@ -249,6 +249,14 @@ config LZ4HC_COMPRESS
config LZ4_DECOMPRESS
tristate
@ -1331,7 +1335,7 @@ index b6009d7..f00ddab 100644
#
diff --git a/lib/Makefile b/lib/Makefile
index 1338226..4fcef16 100644
index d06b68a..d5c8a4f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -116,6 +116,8 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
@ -10012,10 +10016,10 @@ index 0000000..2143da2
+#endif /* HUF_H_298734234 */
diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
new file mode 100644
index 0000000..0361f38
index 0000000..40055a7
--- /dev/null
+++ b/lib/zstd/huf_compress.c
@@ -0,0 +1,771 @@
@@ -0,0 +1,770 @@
+/*
+ * Huffman encoder, part of New Generation Entropy library
+ * Copyright (C) 2013-2016, Yann Collet.
@ -10543,7 +10547,7 @@ index 0000000..0361f38
+
+size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+
+#define HUF_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
+#define HUF_FLUSHBITS(s) BIT_flushBits(s)
+
+#define HUF_FLUSHBITS_1(stream) \
+ if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \
@ -10560,7 +10564,6 @@ index 0000000..0361f38
+ BYTE *const oend = ostart + dstSize;
+ BYTE *op = ostart;
+ size_t n;
+ const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize));
+ BIT_CStream_t bitC;
+
+ /* init */
@ -11993,10 +11996,10 @@ index 0000000..a282624
+}
diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h
new file mode 100644
index 0000000..6748719
index 0000000..f0ba474
--- /dev/null
+++ b/lib/zstd/zstd_internal.h
@@ -0,0 +1,269 @@
@@ -0,0 +1,250 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
@ -12125,35 +12128,16 @@ index 0000000..6748719
+/*-*******************************************
+* Shared functions to include for inlining
+*********************************************/
+static void ZSTD_copy8(void *dst, const void *src) { memcpy(dst, src, 8); }
+#define COPY8(d, s) \
+ { \
+ ZSTD_copy8(d, s); \
+ d += 8; \
+ s += 8; \
+ }
+
+static void ZSTD_copy8(void *dst, const void *src) {
+ memcpy(dst, src, 8);
+}
+/*! ZSTD_wildcopy() :
+* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
+#define WILDCOPY_OVERLENGTH 8
+ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length)
+{
+ const BYTE *ip = (const BYTE *)src;
+ BYTE *op = (BYTE *)dst;
+ BYTE *const oend = op + length;
+ do
+ COPY8(op, ip)
+ while (op < oend);
+}
+
+ZSTD_STATIC void ZSTD_wildcopy_e(void *dst, const void *src, void *dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */
+{
+ const BYTE *ip = (const BYTE *)src;
+ BYTE *op = (BYTE *)dst;
+ BYTE *const oend = (BYTE *)dstEnd;
+ do
+ COPY8(op, ip)
+ while (op < oend);
+ if (length > 0)
+ memcpy(dst, src, length);
+}
+
+/*-*******************************************

View File

@ -1,7 +1,7 @@
From 599f8f2aaace3df939cb145368574a52268d82d0 Mon Sep 17 00:00:00 2001
From b0ef8fc63c9ca251ceca632f53aa1de8f1f17772 Mon Sep 17 00:00:00 2001
From: Nick Terrell <terrelln@fb.com>
Date: Wed, 21 Jun 2017 17:31:39 -0700
Subject: [PATCH v2 3/4] btrfs: Add zstd support
Date: Mon, 17 Jul 2017 17:08:39 -0700
Subject: [PATCH v3 3/4] btrfs: Add zstd support
Add zstd compression and decompression support to BtrFS. zstd at its
fastest level compresses almost as well as zlib, while offering much
@ -63,6 +63,10 @@ zstd source repository: https://github.com/facebook/zstd
Signed-off-by: Nick Terrell <terrelln@fb.com>
---
v2 -> v3:
- Port upstream BtrFS commits e1ddce71d6, 389a6cfc2a, and 6acafd1eff
- Change default compression level for BtrFS to 3
fs/btrfs/Kconfig | 2 +
fs/btrfs/Makefile | 2 +-
fs/btrfs/compression.c | 1 +
@ -73,9 +77,9 @@ Signed-off-by: Nick Terrell <terrelln@fb.com>
fs/btrfs/props.c | 6 +
fs/btrfs/super.c | 12 +-
fs/btrfs/sysfs.c | 2 +
fs/btrfs/zstd.c | 433 +++++++++++++++++++++++++++++++++++++++++++++
fs/btrfs/zstd.c | 435 +++++++++++++++++++++++++++++++++++++++++++++
include/uapi/linux/btrfs.h | 8 +-
12 files changed, 469 insertions(+), 12 deletions(-)
12 files changed, 471 insertions(+), 12 deletions(-)
create mode 100644 fs/btrfs/zstd.c
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
@ -105,10 +109,10 @@ index 128ce17..962a95a 100644
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
uuid-tree.o props.o hash.o free-space-tree.o
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 10e6b28..3beb0d0 100644
index d2ef9ac..4ff42d1 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -761,6 +761,7 @@ static struct {
@@ -704,6 +704,7 @@ static struct {
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zlib_compress,
&btrfs_lzo_compress,
@ -117,10 +121,10 @@ index 10e6b28..3beb0d0 100644
void __init btrfs_init_compress(void)
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 39ec43a..d99fc21 100644
index 87f6d33..2269e00 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -60,8 +60,9 @@ enum btrfs_compression_type {
@@ -99,8 +99,9 @@ enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
BTRFS_COMPRESS_ZLIB = 1,
BTRFS_COMPRESS_LZO = 2,
@ -132,7 +136,7 @@ index 39ec43a..d99fc21 100644
};
struct btrfs_compress_op {
@@ -92,5 +93,6 @@ struct btrfs_compress_op {
@@ -128,5 +129,6 @@ struct btrfs_compress_op {
extern const struct btrfs_compress_op btrfs_zlib_compress;
extern const struct btrfs_compress_op btrfs_lzo_compress;
@ -140,10 +144,10 @@ index 39ec43a..d99fc21 100644
#endif
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4f8f75d..61dd3dd 100644
index 3f3eb7b..845d77c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -271,6 +271,7 @@ struct btrfs_super_block {
@@ -270,6 +270,7 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
@ -152,10 +156,10 @@ index 4f8f75d..61dd3dd 100644
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5f678dc..49c0e91 100644
index 080e2eb..04632f4 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2831,6 +2831,8 @@ int open_ctree(struct super_block *sb,
@@ -2828,6 +2828,8 @@ int open_ctree(struct super_block *sb,
features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
@ -165,7 +169,7 @@ index 5f678dc..49c0e91 100644
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
btrfs_info(fs_info, "has skinny extents");
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index e176375..f732cfd 100644
index fa1b78c..b9963d9 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -327,8 +327,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
@ -180,7 +184,7 @@ index e176375..f732cfd 100644
ret = btrfs_set_prop(inode, "btrfs.compression",
comp, strlen(comp), 0);
if (ret)
@@ -1463,6 +1465,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
@@ -1466,6 +1468,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (range->compress_type == BTRFS_COMPRESS_LZO) {
btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
@ -190,10 +194,10 @@ index e176375..f732cfd 100644
ret = defrag_count;
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index d6cb155..162105f 100644
index 4b23ae5..20631e9 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -383,6 +383,8 @@ static int prop_compression_validate(const char *value, size_t len)
@@ -390,6 +390,8 @@ static int prop_compression_validate(const char *value, size_t len)
return 0;
else if (!strncmp("zlib", value, len))
return 0;
@ -202,7 +206,7 @@ index d6cb155..162105f 100644
return -EINVAL;
}
@@ -405,6 +407,8 @@ static int prop_compression_apply(struct inode *inode,
@@ -412,6 +414,8 @@ static int prop_compression_apply(struct inode *inode,
type = BTRFS_COMPRESS_LZO;
else if (!strncmp("zlib", value, len))
type = BTRFS_COMPRESS_ZLIB;
@ -211,7 +215,7 @@ index d6cb155..162105f 100644
else
return -EINVAL;
@@ -422,6 +426,8 @@ static const char *prop_compression_extract(struct inode *inode)
@@ -429,6 +433,8 @@ static const char *prop_compression_extract(struct inode *inode)
return "zlib";
case BTRFS_COMPRESS_LZO:
return "lzo";
@ -221,7 +225,7 @@ index d6cb155..162105f 100644
return NULL;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4f1cdd5..4f792d5 100644
index 12540b6..c370dea 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -513,6 +513,14 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
@ -239,7 +243,7 @@ index 4f1cdd5..4f792d5 100644
} else if (strncmp(args[0].from, "no", 2) == 0) {
compress_type = "no";
btrfs_clear_opt(info->mount_opt, COMPRESS);
@@ -1240,8 +1248,10 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
@@ -1227,8 +1235,10 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
if (btrfs_test_opt(info, COMPRESS)) {
if (info->compress_type == BTRFS_COMPRESS_ZLIB)
compress_type = "zlib";
@ -252,7 +256,7 @@ index 4f1cdd5..4f792d5 100644
seq_printf(seq, ",compress-force=%s", compress_type);
else
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 1f157fb..b0dec90 100644
index c2d5f35..2b6d37c 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -200,6 +200,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(mixed_backref, MIXED_BACKREF);
@ -273,10 +277,10 @@ index 1f157fb..b0dec90 100644
BTRFS_FEAT_ATTR_PTR(raid56),
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
new file mode 100644
index 0000000..838741b
index 0000000..1822068
--- /dev/null
+++ b/fs/btrfs/zstd.c
@@ -0,0 +1,433 @@
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
@ -308,10 +312,11 @@ index 0000000..838741b
+
+#define ZSTD_BTRFS_MAX_WINDOWLOG 17
+#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
+#define ZSTD_BTRFS_DEFAULT_LEVEL 3
+
+static ZSTD_parameters zstd_get_btrfs_parameters(size_t src_len)
+{
+ ZSTD_parameters params = ZSTD_getParams(1, src_len, 0);
+ ZSTD_parameters params = ZSTD_getParams(ZSTD_BTRFS_DEFAULT_LEVEL, src_len, 0);
+
+ if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
+ params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
@ -330,7 +335,7 @@ index 0000000..838741b
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+
+ vfree(workspace->mem);
+ kvfree(workspace->mem);
+ kfree(workspace->buf);
+ kfree(workspace);
+}
@ -341,15 +346,15 @@ index 0000000..838741b
+ zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT);
+ struct workspace *workspace;
+
+ workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
+ workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
+ if (!workspace)
+ return ERR_PTR(-ENOMEM);
+
+ workspace->size = max_t(size_t,
+ ZSTD_CStreamWorkspaceBound(params.cParams),
+ ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
+ workspace->mem = vmalloc(workspace->size);
+ workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
+ workspace->mem = kvmalloc(workspace->size, GFP_KERNEL);
+ workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!workspace->mem || !workspace->buf)
+ goto fail;
+
@ -541,12 +546,13 @@ index 0000000..838741b
+ return ret;
+}
+
+static int zstd_decompress_bio(struct list_head *ws, struct page **pages_in,
+ u64 disk_start,
+ struct bio *orig_bio,
+ size_t srclen)
+static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ struct page **pages_in = cb->compressed_pages;
+ u64 disk_start = cb->start;
+ struct bio *orig_bio = cb->orig_bio;
+ size_t srclen = cb->compressed_len;
+ ZSTD_DStream *stream;
+ int ret = 0;
+ unsigned long page_in_index = 0;
@ -711,7 +717,7 @@ index 0000000..838741b
+ .decompress = zstd_decompress,
+};
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index a456e53..992c150 100644
index 9aa74f3..378230c 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -255,13 +255,7 @@ struct btrfs_ioctl_fs_info_args {

View File

@ -1,7 +1,7 @@
From 5ff6a64abaea7b7f11d37cb0fdf08642316a3a90 Mon Sep 17 00:00:00 2001
From 0cd63464d182bb9708f8b25f7da3dc8e5ec6b4fa Mon Sep 17 00:00:00 2001
From: Nick Terrell <terrelln@fb.com>
Date: Mon, 12 Jun 2017 12:18:23 -0700
Subject: [PATCH v2 4/4] squashfs: Add zstd support
Date: Mon, 17 Jul 2017 17:08:59 -0700
Subject: [PATCH v3 4/4] squashfs: Add zstd support
Add zstd compression and decompression support to SquashFS. zstd is a
great fit for SquashFS because it can compress at ratios approaching xz,

View File

@ -29,10 +29,11 @@
#define ZSTD_BTRFS_MAX_WINDOWLOG 17
#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
#define ZSTD_BTRFS_DEFAULT_LEVEL 3
static ZSTD_parameters zstd_get_btrfs_parameters(size_t src_len)
{
ZSTD_parameters params = ZSTD_getParams(1, src_len, 0);
ZSTD_parameters params = ZSTD_getParams(ZSTD_BTRFS_DEFAULT_LEVEL, src_len, 0);
if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
@ -51,7 +52,7 @@ static void zstd_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
vfree(workspace->mem);
kvfree(workspace->mem);
kfree(workspace->buf);
kfree(workspace);
}
@ -62,15 +63,15 @@ static struct list_head *zstd_alloc_workspace(void)
zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT);
struct workspace *workspace;
workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
if (!workspace)
return ERR_PTR(-ENOMEM);
workspace->size = max_t(size_t,
ZSTD_CStreamWorkspaceBound(params.cParams),
ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
workspace->mem = vmalloc(workspace->size);
workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
workspace->mem = kvmalloc(workspace->size, GFP_KERNEL);
workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!workspace->mem || !workspace->buf)
goto fail;
@ -262,12 +263,13 @@ out:
return ret;
}
static int zstd_decompress_bio(struct list_head *ws, struct page **pages_in,
u64 disk_start,
struct bio *orig_bio,
size_t srclen)
static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
struct page **pages_in = cb->compressed_pages;
u64 disk_start = cb->start;
struct bio *orig_bio = cb->orig_bio;
size_t srclen = cb->compressed_len;
ZSTD_DStream *stream;
int ret = 0;
unsigned long page_in_index = 0;

View File

@ -525,7 +525,7 @@ static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
#define HUF_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
#define HUF_FLUSHBITS(s) BIT_flushBits(s)
#define HUF_FLUSHBITS_1(stream) \
if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \
@ -542,7 +542,6 @@ size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, si
BYTE *const oend = ostart + dstSize;
BYTE *op = ostart;
size_t n;
const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize));
BIT_CStream_t bitC;
/* init */

View File

@ -126,35 +126,16 @@ static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
/*-*******************************************
* Shared functions to include for inlining
*********************************************/
static void ZSTD_copy8(void *dst, const void *src) { memcpy(dst, src, 8); }
#define COPY8(d, s) \
{ \
ZSTD_copy8(d, s); \
d += 8; \
s += 8; \
}
static void ZSTD_copy8(void *dst, const void *src) {
memcpy(dst, src, 8);
}
/*! ZSTD_wildcopy() :
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
#define WILDCOPY_OVERLENGTH 8
ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length)
{
const BYTE *ip = (const BYTE *)src;
BYTE *op = (BYTE *)dst;
BYTE *const oend = op + length;
do
COPY8(op, ip)
while (op < oend);
}
ZSTD_STATIC void ZSTD_wildcopy_e(void *dst, const void *src, void *dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */
{
const BYTE *ip = (const BYTE *)src;
BYTE *op = (BYTE *)dst;
BYTE *const oend = (BYTE *)dstEnd;
do
COPY8(op, ip)
while (op < oend);
if (length > 0)
memcpy(dst, src, length);
}
/*-*******************************************

View File

@ -0,0 +1,37 @@
# ################################################################
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# ################################################################
# This Makefile presumes libzstd is installed, using `sudo make install`
CPPFLAGS+= -I../../lib/common
CFLAGS ?= -O3
DEBUGFLAGS = -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
-Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
-Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security \
-Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \
-Wredundant-decls
CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS)
FLAGS = $(CPPFLAGS) $(CFLAGS)
LDFLAGS += -lzstd
.PHONY: default all clean
default: all
all: ldm
ldm: ldm_common.c ldm.c main.c
$(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@
clean:
@rm -f core *.o tmp* result* *.ldm *.ldm.dec \
ldm
@echo Cleaning completed

View File

@ -0,0 +1,102 @@
This is a compression algorithm focused on finding long distance matches.
It is based upon lz4 and uses nearly the same block format (github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md). The number of bytes to encode the offset is four instead of two in lz4 to reflect the longer distance matching. The block format is described in `ldm.h`.
### Build
Run `make`.
### Compressing a file
`ldm <filename>`
Decompression and verification can be enabled by defining `DECOMPRESS_AND_VERIFY` in `main.c`.
The output file names are as follows:
- `<filename>.ldm` : compressed file
- `<filename>.ldm.dec` : decompressed file
### Parameters
There are various parameters that can be tuned. These parameters can be tuned in `ldm.h` or, alternatively if `ldm_params.h` is included, in `ldm_params.h` (for easier configuration).
The parameters are as follows and must all be defined:
- `LDM_MEMORY_USAGE` : the memory usage of the underlying hash table in bytes.
- `HASH_BUCKET_SIZE_LOG` : the log size of each bucket in the hash table (used in collision resolution).
- `LDM_LAG` : the lag (in bytes) in inserting entries into the hash table.
- `LDM_WINDOW_SIZE_LOG` : the log maximum window size when searching for matches.
- `LDM_MIN_MATCH_LENGTH` : the minimum match length.
- `INSERT_BY_TAG` : insert entries into the hash table as a function of the hash. This increases speed by reducing the number of hash table lookups and match comparisons. Certain hashes will never be inserted.
- `USE_CHECKSUM` : store a checksum with the hash table entries for faster comparison. This halves the number of entries the hash table can contain.
The optional parameter `HASH_ONLY_EVERY_LOG` is the log inverse frequency of insertion into the hash table. That is, an entry is inserted approximately every `1 << HASH_ONLY_EVERY_LOG` times. If this parameter is not defined, the value is computed as a function of the window size and memory usage to approximate an even coverage of the window.
### Benchmark
Below is a comparison of various compression methods on a tar of four versions of llvm (versions `3.9.0`, `3.9.1`, `4.0.0`, `4.0.1`) with a total size of `727900160` B.
| Method | Size | Ratio |
|:---|---:|---:|
|lrzip -p 32 -n -w 1 | `369968714` | `1.97`|
|ldm | `209391361` | `3.48`|
|lz4 | `189954338` | `3.83`|
|lrzip -p 32 -l -w 1 | `163940343` | `4.44`|
|zstd -1 | `126080293` | `5.77`|
|lrzip -p 32 -n | `124821009` | `5.83`|
|lrzip -p 32 -n -w 1 & zstd -1 | `120317909` | `6.05`|
|zstd -3 -o | `115290952` | `6.31`|
|lrzip -p 32 -g -L 9 -w 1 | `107168979` | `6.79`|
|zstd -6 -o | `102772098` | `7.08`|
|zstd -T16 -9 | `98040470` | `7.42`|
|lrzip -p 32 -n -w 1 & zstd -T32 -19 | `88050289` | `8.27`|
|zstd -T32 -19 | `83626098` | `8.70`|
|lrzip -p 32 -n & zstd -1 | `36335117` | `20.03`|
|ldm & zstd -6 | `32856232` | `22.15`|
|lrzip -p 32 -g -L 9 | `32243594` | `22.58`|
|lrzip -p 32 -n & zstd -6 | `30954572` | `23.52`|
|lrzip -p 32 -n & zstd -T32 -19 | `26472064` | `27.50`|
The method marked `ldm` was run with the following parameters:
| Parameter | Value |
|:---|---:|
| `LDM_MEMORY_USAGE` | `23`|
|`HASH_BUCKET_SIZE_LOG` | `3`|
|`LDM_LAG` | `0`|
|`LDM_WINDOW_SIZE_LOG` | `28`|
|`LDM_MIN_MATCH_LENGTH`| `64`|
|`INSERT_BY_TAG` | `1`|
|`USE_CHECKSUM` | `1`|
The compression speed was `220.5 MB/s`.
### Parameter selection
Below is a brief discussion of the effects of the parameters on the speed and compression ratio.
#### Speed
A large bottleneck in terms of speed is finding the matches and comparing to see if they are greater than the minimum match length. Generally:
- The fewer matches found (or the lower the percentage of the literals matched), the slower the algorithm will behave.
- Increasing `HASH_ONLY_EVERY_LOG` results in fewer inserts and, if `INSERT_BY_TAG` is set, fewer lookups in the table. This has a large effect on speed, as well as compression ratio.
- If `HASH_ONLY_EVERY_LOG` is not set, its value is calculated based on `LDM_WINDOW_SIZE_LOG` and `LDM_MEMORY_USAGE`. Increasing `LDM_WINDOW_SIZE_LOG` has the effect of increasing `HASH_ONLY_EVERY_LOG` and increasing `LDM_MEMORY_USAGE` decreases `HASH_ONLY_EVERY_LOG`.
- `USE_CHECKSUM` generally improves speed with hash table lookups.
#### Compression ratio
The compression ratio is highly correlated with the coverage of matches. As a long distance matcher, the algorithm was designed to "optimize" for long distance matches outside the zstd compression window. The compression ratio after recompressing the output of the long-distance matcher with zstd was a more important signal in development than the raw compression ratio itself.
Generally, increasing `LDM_MEMORY_USAGE` will improve the compression ratio. However when using the default computed value of `HASH_ONLY_EVERY_LOG`, this increases the frequency of insertion and lookup in the table and thus may result in a decrease in speed.
Below is a table showing the speed and compression ratio when compressing the llvm tar (as described above) using different settings for `LDM_MEMORY_USAGE`. The other parameters were the same as used in the benchmark above.
| `LDM_MEMORY_USAGE` | Ratio | Speed (MB/s) | Ratio after zstd -6 |
|---:| ---: | ---: | ---: |
| `18` | `1.85` | `232.4` | `10.92` |
| `21` | `2.79` | `233.9` | `15.92` |
| `23` | `3.48` | `220.5` | `18.29` |
| `25` | `4.56` | `140.8` | `19.21` |
### Compression statistics
Compression statistics (and the configuration) can be enabled/disabled via `COMPUTE_STATS` and `OUTPUT_CONFIGURATION` in `ldm.h`.

View File

@ -0,0 +1,857 @@
#include <limits.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "ldm.h"
#define LDM_HASHTABLESIZE (1 << (LDM_MEMORY_USAGE))
#define LDM_HASHTABLESIZE_U32 ((LDM_HASHTABLESIZE) >> 2)
#define LDM_HASHTABLESIZE_U64 ((LDM_HASHTABLESIZE) >> 3)
#if USE_CHECKSUM
#define LDM_HASH_ENTRY_SIZE_LOG 3
#else
#define LDM_HASH_ENTRY_SIZE_LOG 2
#endif
// Entries are inserted into the table HASH_ONLY_EVERY + 1 times "on average".
#ifndef HASH_ONLY_EVERY_LOG
#define HASH_ONLY_EVERY_LOG (LDM_WINDOW_SIZE_LOG-((LDM_MEMORY_USAGE)-(LDM_HASH_ENTRY_SIZE_LOG)))
#endif
#define HASH_ONLY_EVERY ((1 << (HASH_ONLY_EVERY_LOG)) - 1)
#define HASH_BUCKET_SIZE (1 << (HASH_BUCKET_SIZE_LOG))
#define NUM_HASH_BUCKETS_LOG ((LDM_MEMORY_USAGE)-(LDM_HASH_ENTRY_SIZE_LOG)-(HASH_BUCKET_SIZE_LOG))
#define HASH_CHAR_OFFSET 10
// Take the first match in the hash bucket only.
//#define ZSTD_SKIP
static const U64 prime8bytes = 11400714785074694791ULL;
// Type of the small hash used to index into the hash table.
typedef U32 hash_t;
#if USE_CHECKSUM
typedef struct LDM_hashEntry {
U32 offset;
U32 checksum;
} LDM_hashEntry;
#else
typedef struct LDM_hashEntry {
U32 offset;
} LDM_hashEntry;
#endif
struct LDM_compressStats {
U32 windowSizeLog, hashTableSizeLog;
U32 numMatches;
U64 totalMatchLength;
U64 totalLiteralLength;
U64 totalOffset;
U32 matchLengthHistogram[32];
U32 minOffset, maxOffset;
U32 offsetHistogram[32];
};
typedef struct LDM_hashTable LDM_hashTable;
struct LDM_CCtx {
size_t isize; /* Input size */
size_t maxOSize; /* Maximum output size */
const BYTE *ibase; /* Base of input */
const BYTE *ip; /* Current input position */
const BYTE *iend; /* End of input */
// Maximum input position such that hashing at the position does not exceed
// end of input.
const BYTE *ihashLimit;
// Maximum input position such that finding a match of at least the minimum
// match length does not exceed end of input.
const BYTE *imatchLimit;
const BYTE *obase; /* Base of output */
BYTE *op; /* Output */
const BYTE *anchor; /* Anchor to start of current (match) block */
LDM_compressStats stats; /* Compression statistics */
LDM_hashTable *hashTable;
const BYTE *lastPosHashed; /* Last position hashed */
U64 lastHash;
const BYTE *nextIp; // TODO: this is redundant (ip + step)
const BYTE *nextPosHashed;
U64 nextHash;
unsigned step; // ip step, should be 1.
const BYTE *lagIp;
U64 lagHash;
};
struct LDM_hashTable {
U32 numBuckets; // The number of buckets.
U32 numEntries; // numBuckets * HASH_BUCKET_SIZE.
LDM_hashEntry *entries;
BYTE *bucketOffsets; // A pointer (per bucket) to the next insert position.
};
static void HASH_destroyTable(LDM_hashTable *table) {
free(table->entries);
free(table->bucketOffsets);
free(table);
}
/**
* Create a hash table that can contain size elements.
* The number of buckets is determined by size >> HASH_BUCKET_SIZE_LOG.
*
* Returns NULL if table creation failed.
*/
static LDM_hashTable *HASH_createTable(U32 size) {
LDM_hashTable *table = malloc(sizeof(LDM_hashTable));
if (!table) return NULL;
table->numBuckets = size >> HASH_BUCKET_SIZE_LOG;
table->numEntries = size;
table->entries = calloc(size, sizeof(LDM_hashEntry));
table->bucketOffsets = calloc(size >> HASH_BUCKET_SIZE_LOG, sizeof(BYTE));
if (!table->entries || !table->bucketOffsets) {
HASH_destroyTable(table);
return NULL;
}
return table;
}
static LDM_hashEntry *getBucket(const LDM_hashTable *table, const hash_t hash) {
return table->entries + (hash << HASH_BUCKET_SIZE_LOG);
}
static unsigned ZSTD_NbCommonBytes (register size_t val) {
if (MEM_isLittleEndian()) {
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
unsigned long r = 0;
_BitScanForward64( &r, (U64)val );
return (unsigned)(r>>3);
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_ctzll((U64)val) >> 3);
# else
static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
0, 3, 1, 3, 1, 4, 2, 7,
0, 2, 3, 6, 1, 5, 3, 5,
1, 3, 4, 4, 2, 5, 6, 7,
7, 0, 1, 2, 3, 3, 4, 6,
2, 6, 5, 5, 3, 4, 5, 6,
7, 1, 2, 4, 6, 4, 4, 5,
7, 2, 6, 5, 7, 6, 7, 7 };
return DeBruijnBytePos[
((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
# endif
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r=0;
_BitScanForward( &r, (U32)val );
return (unsigned)(r>>3);
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_ctz((U32)val) >> 3);
# else
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
3, 2, 2, 1, 3, 2, 0, 1,
3, 3, 1, 2, 2, 2, 2, 0,
3, 1, 2, 0, 1, 0, 1, 1 };
return DeBruijnBytePos[
((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
# endif
}
} else { /* Big Endian CPU */
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
unsigned long r = 0;
_BitScanReverse64( &r, val );
return (unsigned)(r>>3);
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_clzll(val) >> 3);
# else
unsigned r;
/* calculate this way due to compiler complaining in 32-bits mode */
const unsigned n32 = sizeof(size_t)*4;
if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
r += (!val);
return r;
# endif
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r = 0;
_BitScanReverse( &r, (unsigned long)val );
return (unsigned)(r>>3);
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_clz((U32)val) >> 3);
# else
unsigned r;
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
r += (!val);
return r;
# endif
}
}
}
// From lib/compress/zstd_compress.c
static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch,
const BYTE *const pInLimit) {
const BYTE * const pStart = pIn;
const BYTE * const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
while (pIn < pInLoopLimit) {
size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
if (!diff) {
pIn += sizeof(size_t);
pMatch += sizeof(size_t);
continue;
}
pIn += ZSTD_NbCommonBytes(diff);
return (size_t)(pIn - pStart);
}
if (MEM_64bits()) {
if ((pIn < (pInLimit - 3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) {
pIn += 4;
pMatch += 4;
}
}
if ((pIn < (pInLimit - 1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) {
pIn += 2;
pMatch += 2;
}
if ((pIn < pInLimit) && (*pMatch == *pIn)) {
pIn++;
}
return (size_t)(pIn - pStart);
}
/**
* Count number of bytes that match backwards before pIn and pMatch.
*
* We count only bytes where pMatch > pBase and pIn > pAnchor.
*/
static size_t countBackwardsMatch(const BYTE *pIn, const BYTE *pAnchor,
const BYTE *pMatch, const BYTE *pBase) {
size_t matchLength = 0;
while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {
pIn--;
pMatch--;
matchLength++;
}
return matchLength;
}
/**
* Returns a pointer to the entry in the hash table matching the hash and
* checksum with the "longest match length" as defined below. The forward and
* backward match lengths are written to *pForwardMatchLength and
* *pBackwardMatchLength.
*
* The match length is defined based on cctx->ip and the entry's offset.
* The forward match is computed from cctx->ip and entry->offset + cctx->ibase.
* The backward match is computed backwards from cctx->ip and
* cctx->ibase only if the forward match is longer than LDM_MIN_MATCH_LENGTH.
*/
static LDM_hashEntry *HASH_getBestEntry(const LDM_CCtx *cctx,
const hash_t hash,
const U32 checksum,
U64 *pForwardMatchLength,
U64 *pBackwardMatchLength) {
LDM_hashTable *table = cctx->hashTable;
LDM_hashEntry *bucket = getBucket(table, hash);
LDM_hashEntry *cur;
LDM_hashEntry *bestEntry = NULL;
U64 bestMatchLength = 0;
#if !(USE_CHECKSUM)
(void)checksum;
#endif
for (cur = bucket; cur < bucket + HASH_BUCKET_SIZE; ++cur) {
const BYTE *pMatch = cur->offset + cctx->ibase;
// Check checksum for faster check.
#if USE_CHECKSUM
if (cur->checksum == checksum &&
cctx->ip - pMatch <= LDM_WINDOW_SIZE) {
#else
if (cctx->ip - pMatch <= LDM_WINDOW_SIZE) {
#endif
U64 forwardMatchLength = ZSTD_count(cctx->ip, pMatch, cctx->iend);
U64 backwardMatchLength, totalMatchLength;
// Only take matches where the forward match length is large enough
// for speed.
if (forwardMatchLength < LDM_MIN_MATCH_LENGTH) {
continue;
}
backwardMatchLength =
countBackwardsMatch(cctx->ip, cctx->anchor,
cur->offset + cctx->ibase,
cctx->ibase);
totalMatchLength = forwardMatchLength + backwardMatchLength;
if (totalMatchLength >= bestMatchLength) {
bestMatchLength = totalMatchLength;
*pForwardMatchLength = forwardMatchLength;
*pBackwardMatchLength = backwardMatchLength;
bestEntry = cur;
#ifdef ZSTD_SKIP
return cur;
#endif
}
}
}
if (bestEntry != NULL) {
return bestEntry;
}
return NULL;
}
/**
* Insert an entry into the hash table. The table uses a "circular buffer",
* with the oldest entry overwritten.
*/
static void HASH_insert(LDM_hashTable *table,
const hash_t hash, const LDM_hashEntry entry) {
*(getBucket(table, hash) + table->bucketOffsets[hash]) = entry;
table->bucketOffsets[hash]++;
table->bucketOffsets[hash] &= HASH_BUCKET_SIZE - 1;
}
static void HASH_outputTableOccupancy(const LDM_hashTable *table) {
U32 ctr = 0;
LDM_hashEntry *cur = table->entries;
LDM_hashEntry *end = table->entries + (table->numBuckets * HASH_BUCKET_SIZE);
for (; cur < end; ++cur) {
if (cur->offset == 0) {
ctr++;
}
}
// The number of buckets is repeated as a check for now.
printf("Num buckets, bucket size: %d (2^%d), %d\n",
table->numBuckets, NUM_HASH_BUCKETS_LOG, HASH_BUCKET_SIZE);
printf("Hash table size, empty slots, %% empty: %u, %u, %.3f\n",
table->numEntries, ctr,
100.0 * (double)(ctr) / table->numEntries);
}
// TODO: This can be done more efficiently, for example by using builtin
// functions (but it is not that important as it is only used for computing
// stats).
static int intLog2(U64 x) {
int ret = 0;
while (x >>= 1) {
ret++;
}
return ret;
}
void LDM_printCompressStats(const LDM_compressStats *stats) {
printf("=====================\n");
printf("Compression statistics\n");
printf("Window size, hash table size (bytes): 2^%u, 2^%u\n",
stats->windowSizeLog, stats->hashTableSizeLog);
printf("num matches, total match length, %% matched: %u, %llu, %.3f\n",
stats->numMatches,
stats->totalMatchLength,
100.0 * (double)stats->totalMatchLength /
(double)(stats->totalMatchLength + stats->totalLiteralLength));
printf("avg match length: %.1f\n", ((double)stats->totalMatchLength) /
(double)stats->numMatches);
printf("avg literal length, total literalLength: %.1f, %llu\n",
((double)stats->totalLiteralLength) / (double)stats->numMatches,
stats->totalLiteralLength);
printf("avg offset length: %.1f\n",
((double)stats->totalOffset) / (double)stats->numMatches);
printf("min offset, max offset: %u, %u\n",
stats->minOffset, stats->maxOffset);
printf("\n");
printf("offset histogram | match length histogram\n");
printf("offset/ML, num matches, %% of matches | num matches, %% of matches\n");
{
int i;
int logMaxOffset = intLog2(stats->maxOffset);
for (i = 0; i <= logMaxOffset; i++) {
printf("2^%*d: %10u %6.3f%% |2^%*d: %10u %6.3f \n",
2, i,
stats->offsetHistogram[i],
100.0 * (double) stats->offsetHistogram[i] /
(double) stats->numMatches,
2, i,
stats->matchLengthHistogram[i],
100.0 * (double) stats->matchLengthHistogram[i] /
(double) stats->numMatches);
}
}
printf("\n");
printf("=====================\n");
}
/**
* Return the upper (most significant) NUM_HASH_BUCKETS_LOG bits.
*/
static hash_t getSmallHash(U64 hash) {
return hash >> (64 - NUM_HASH_BUCKETS_LOG);
}
/**
* Return the 32 bits after the upper NUM_HASH_BUCKETS_LOG bits.
*/
static U32 getChecksum(U64 hash) {
return (hash >> (64 - 32 - NUM_HASH_BUCKETS_LOG)) & 0xFFFFFFFF;
}
#if INSERT_BY_TAG
static U32 lowerBitsFromHfHash(U64 hash) {
// The number of bits used so far is NUM_HASH_BUCKETS_LOG + 32.
// So there are 32 - NUM_HASH_BUCKETS_LOG bits left.
// Occasional hashing requires HASH_ONLY_EVERY_LOG bits.
// So if 32 - LDMHASHLOG < HASH_ONLY_EVERY_LOG, just return lower bits
// allowing for reuse of bits.
if (32 - NUM_HASH_BUCKETS_LOG < HASH_ONLY_EVERY_LOG) {
return hash & HASH_ONLY_EVERY;
} else {
// Otherwise shift by
// (32 - NUM_HASH_BUCKETS_LOG - HASH_ONLY_EVERY_LOG) bits first.
return (hash >> (32 - NUM_HASH_BUCKETS_LOG - HASH_ONLY_EVERY_LOG)) &
HASH_ONLY_EVERY;
}
}
#endif
/**
* Get a 64-bit hash using the first len bytes from buf.
*
* Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be
* H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0)
*
* where the constant a is defined to be prime8bytes.
*
* The implementation adds an offset to each byte, so
* H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ...
*/
static U64 getHash(const BYTE *buf, U32 len) {
U64 ret = 0;
U32 i;
for (i = 0; i < len; i++) {
ret *= prime8bytes;
ret += buf[i] + HASH_CHAR_OFFSET;
}
return ret;
}
static U64 ipow(U64 base, U64 exp) {
U64 ret = 1;
while (exp) {
if (exp & 1) {
ret *= base;
}
exp >>= 1;
base *= base;
}
return ret;
}
static U64 updateHash(U64 hash, U32 len,
BYTE toRemove, BYTE toAdd) {
// TODO: this relies on compiler optimization.
// The exponential can be calculated explicitly as len is constant.
hash -= ((toRemove + HASH_CHAR_OFFSET) *
ipow(prime8bytes, len - 1));
hash *= prime8bytes;
hash += toAdd + HASH_CHAR_OFFSET;
return hash;
}
/**
* Update cctx->nextHash and cctx->nextPosHashed
* based on cctx->lastHash and cctx->lastPosHashed.
*
* This uses a rolling hash and requires that the last position hashed
* corresponds to cctx->nextIp - step.
*/
static void setNextHash(LDM_CCtx *cctx) {
cctx->nextHash = updateHash(
cctx->lastHash, LDM_HASH_LENGTH,
cctx->lastPosHashed[0],
cctx->lastPosHashed[LDM_HASH_LENGTH]);
cctx->nextPosHashed = cctx->nextIp;
#if LDM_LAG
if (cctx->ip - cctx->ibase > LDM_LAG) {
cctx->lagHash = updateHash(
cctx->lagHash, LDM_HASH_LENGTH,
cctx->lagIp[0], cctx->lagIp[LDM_HASH_LENGTH]);
cctx->lagIp++;
}
#endif
}
static void putHashOfCurrentPositionFromHash(LDM_CCtx *cctx, U64 hash) {
// Hash only every HASH_ONLY_EVERY times, based on cctx->ip.
// Note: this works only when cctx->step is 1.
#if LDM_LAG
if (cctx -> lagIp - cctx->ibase > 0) {
#if INSERT_BY_TAG
U32 hashEveryMask = lowerBitsFromHfHash(cctx->lagHash);
if (hashEveryMask == HASH_ONLY_EVERY) {
#else
if (((cctx->ip - cctx->ibase) & HASH_ONLY_EVERY) == HASH_ONLY_EVERY) {
#endif
U32 smallHash = getSmallHash(cctx->lagHash);
# if USE_CHECKSUM
U32 checksum = getChecksum(cctx->lagHash);
const LDM_hashEntry entry = { cctx->lagIp - cctx->ibase, checksum };
# else
const LDM_hashEntry entry = { cctx->lagIp - cctx->ibase };
# endif
HASH_insert(cctx->hashTable, smallHash, entry);
}
} else {
#endif // LDM_LAG
#if INSERT_BY_TAG
U32 hashEveryMask = lowerBitsFromHfHash(hash);
if (hashEveryMask == HASH_ONLY_EVERY) {
#else
if (((cctx->ip - cctx->ibase) & HASH_ONLY_EVERY) == HASH_ONLY_EVERY) {
#endif
U32 smallHash = getSmallHash(hash);
#if USE_CHECKSUM
U32 checksum = getChecksum(hash);
const LDM_hashEntry entry = { cctx->ip - cctx->ibase, checksum };
#else
const LDM_hashEntry entry = { cctx->ip - cctx->ibase };
#endif
HASH_insert(cctx->hashTable, smallHash, entry);
}
#if LDM_LAG
}
#endif
cctx->lastPosHashed = cctx->ip;
cctx->lastHash = hash;
}
/**
* Copy over the cctx->lastHash, and cctx->lastPosHashed
* fields from the "next" fields.
*
* This requires that cctx->ip == cctx->nextPosHashed.
*/
static void LDM_updateLastHashFromNextHash(LDM_CCtx *cctx) {
putHashOfCurrentPositionFromHash(cctx, cctx->nextHash);
}
/**
* Insert hash of the current position into the hash table.
*/
static void LDM_putHashOfCurrentPosition(LDM_CCtx *cctx) {
U64 hash = getHash(cctx->ip, LDM_HASH_LENGTH);
putHashOfCurrentPositionFromHash(cctx, hash);
}
size_t LDM_initializeCCtx(LDM_CCtx *cctx,
const void *src, size_t srcSize,
void *dst, size_t maxDstSize) {
cctx->isize = srcSize;
cctx->maxOSize = maxDstSize;
cctx->ibase = (const BYTE *)src;
cctx->ip = cctx->ibase;
cctx->iend = cctx->ibase + srcSize;
cctx->ihashLimit = cctx->iend - LDM_HASH_LENGTH;
cctx->imatchLimit = cctx->iend - LDM_MIN_MATCH_LENGTH;
cctx->obase = (BYTE *)dst;
cctx->op = (BYTE *)dst;
cctx->anchor = cctx->ibase;
memset(&(cctx->stats), 0, sizeof(cctx->stats));
#if USE_CHECKSUM
cctx->hashTable = HASH_createTable(LDM_HASHTABLESIZE_U64);
#else
cctx->hashTable = HASH_createTable(LDM_HASHTABLESIZE_U32);
#endif
if (!cctx->hashTable) return 1;
cctx->stats.minOffset = UINT_MAX;
cctx->stats.windowSizeLog = LDM_WINDOW_SIZE_LOG;
cctx->stats.hashTableSizeLog = LDM_MEMORY_USAGE;
cctx->lastPosHashed = NULL;
cctx->step = 1; // Fixed to be 1 for now. Changing may break things.
cctx->nextIp = cctx->ip + cctx->step;
cctx->nextPosHashed = 0;
return 0;
}
void LDM_destroyCCtx(LDM_CCtx *cctx) {
HASH_destroyTable(cctx->hashTable);
}
/**
* Finds the "best" match.
*
* Returns 0 if successful and 1 otherwise (i.e. no match can be found
* in the remaining input that is long enough).
*
* forwardMatchLength contains the forward length of the match.
*/
static int LDM_findBestMatch(LDM_CCtx *cctx, const BYTE **match,
U64 *forwardMatchLength, U64 *backwardMatchLength) {
LDM_hashEntry *entry = NULL;
cctx->nextIp = cctx->ip + cctx->step;
while (entry == NULL) {
U64 hash;
hash_t smallHash;
U32 checksum;
#if INSERT_BY_TAG
U32 hashEveryMask;
#endif
setNextHash(cctx);
hash = cctx->nextHash;
smallHash = getSmallHash(hash);
checksum = getChecksum(hash);
#if INSERT_BY_TAG
hashEveryMask = lowerBitsFromHfHash(hash);
#endif
cctx->ip = cctx->nextIp;
cctx->nextIp += cctx->step;
if (cctx->ip > cctx->imatchLimit) {
return 1;
}
#if INSERT_BY_TAG
if (hashEveryMask == HASH_ONLY_EVERY) {
entry = HASH_getBestEntry(cctx, smallHash, checksum,
forwardMatchLength, backwardMatchLength);
}
#else
entry = HASH_getBestEntry(cctx, smallHash, checksum,
forwardMatchLength, backwardMatchLength);
#endif
if (entry != NULL) {
*match = entry->offset + cctx->ibase;
}
putHashOfCurrentPositionFromHash(cctx, hash);
}
setNextHash(cctx);
return 0;
}
void LDM_encodeLiteralLengthAndLiterals(
LDM_CCtx *cctx, BYTE *pToken, const U64 literalLength) {
/* Encode the literal length. */
if (literalLength >= RUN_MASK) {
U64 len = (U64)literalLength - RUN_MASK;
*pToken = (RUN_MASK << ML_BITS);
for (; len >= 255; len -= 255) {
*(cctx->op)++ = 255;
}
*(cctx->op)++ = (BYTE)len;
} else {
*pToken = (BYTE)(literalLength << ML_BITS);
}
/* Encode the literals. */
memcpy(cctx->op, cctx->anchor, literalLength);
cctx->op += literalLength;
}
void LDM_outputBlock(LDM_CCtx *cctx,
const U64 literalLength,
const U32 offset,
const U64 matchLength) {
BYTE *pToken = cctx->op++;
/* Encode the literal length and literals. */
LDM_encodeLiteralLengthAndLiterals(cctx, pToken, literalLength);
/* Encode the offset. */
MEM_write32(cctx->op, offset);
cctx->op += LDM_OFFSET_SIZE;
/* Encode the match length. */
if (matchLength >= ML_MASK) {
U64 matchLengthRemaining = matchLength;
*pToken += ML_MASK;
matchLengthRemaining -= ML_MASK;
MEM_write32(cctx->op, 0xFFFFFFFF);
while (matchLengthRemaining >= 4*0xFF) {
cctx->op += 4;
MEM_write32(cctx->op, 0xffffffff);
matchLengthRemaining -= 4*0xFF;
}
cctx->op += matchLengthRemaining / 255;
*(cctx->op)++ = (BYTE)(matchLengthRemaining % 255);
} else {
*pToken += (BYTE)(matchLength);
}
}
// TODO: maxDstSize is unused. This function may seg fault when writing
// beyond the size of dst, as it does not check maxDstSize. Writing to
// a buffer and performing checks is a possible solution.
//
// This is based upon lz4.
size_t LDM_compress(const void *src, size_t srcSize,
void *dst, size_t maxDstSize) {
LDM_CCtx cctx;
const BYTE *match = NULL;
U64 forwardMatchLength = 0;
U64 backwardsMatchLength = 0;
if (LDM_initializeCCtx(&cctx, src, srcSize, dst, maxDstSize)) {
// Initialization failed.
return 0;
}
#ifdef OUTPUT_CONFIGURATION
LDM_outputConfiguration();
#endif
/* Hash the first position and put it into the hash table. */
LDM_putHashOfCurrentPosition(&cctx);
cctx.lagIp = cctx.ip;
cctx.lagHash = cctx.lastHash;
/**
* Find a match.
* If no more matches can be found (i.e. the length of the remaining input
* is less than the minimum match length), then stop searching for matches
* and encode the final literals.
*/
while (!LDM_findBestMatch(&cctx, &match, &forwardMatchLength,
&backwardsMatchLength)) {
#ifdef COMPUTE_STATS
cctx.stats.numMatches++;
#endif
cctx.ip -= backwardsMatchLength;
match -= backwardsMatchLength;
/**
* Write current block (literals, literal length, match offset, match
* length) and update pointers and hashes.
*/
{
const U64 literalLength = cctx.ip - cctx.anchor;
const U32 offset = cctx.ip - match;
const U64 matchLength = forwardMatchLength +
backwardsMatchLength -
LDM_MIN_MATCH_LENGTH;
LDM_outputBlock(&cctx, literalLength, offset, matchLength);
#ifdef COMPUTE_STATS
cctx.stats.totalLiteralLength += literalLength;
cctx.stats.totalOffset += offset;
cctx.stats.totalMatchLength += matchLength + LDM_MIN_MATCH_LENGTH;
cctx.stats.minOffset =
offset < cctx.stats.minOffset ? offset : cctx.stats.minOffset;
cctx.stats.maxOffset =
offset > cctx.stats.maxOffset ? offset : cctx.stats.maxOffset;
cctx.stats.offsetHistogram[(U32)intLog2(offset)]++;
cctx.stats.matchLengthHistogram[
(U32)intLog2(matchLength + LDM_MIN_MATCH_LENGTH)]++;
#endif
// Move ip to end of block, inserting hashes at each position.
cctx.nextIp = cctx.ip + cctx.step;
while (cctx.ip < cctx.anchor + LDM_MIN_MATCH_LENGTH +
matchLength + literalLength) {
if (cctx.ip > cctx.lastPosHashed) {
// TODO: Simplify.
LDM_updateLastHashFromNextHash(&cctx);
setNextHash(&cctx);
}
cctx.ip++;
cctx.nextIp++;
}
}
// Set start of next block to current input pointer.
cctx.anchor = cctx.ip;
LDM_updateLastHashFromNextHash(&cctx);
}
/* Encode the last literals (no more matches). */
{
const U64 lastRun = cctx.iend - cctx.anchor;
BYTE *pToken = cctx.op++;
LDM_encodeLiteralLengthAndLiterals(&cctx, pToken, lastRun);
}
#ifdef COMPUTE_STATS
LDM_printCompressStats(&cctx.stats);
HASH_outputTableOccupancy(cctx.hashTable);
#endif
{
const size_t ret = cctx.op - cctx.obase;
LDM_destroyCCtx(&cctx);
return ret;
}
}
void LDM_outputConfiguration(void) {
printf("=====================\n");
printf("Configuration\n");
printf("LDM_WINDOW_SIZE_LOG: %d\n", LDM_WINDOW_SIZE_LOG);
printf("LDM_MIN_MATCH_LENGTH, LDM_HASH_LENGTH: %d, %d\n",
LDM_MIN_MATCH_LENGTH, LDM_HASH_LENGTH);
printf("LDM_MEMORY_USAGE: %d\n", LDM_MEMORY_USAGE);
printf("HASH_ONLY_EVERY_LOG: %d\n", HASH_ONLY_EVERY_LOG);
printf("HASH_BUCKET_SIZE_LOG: %d\n", HASH_BUCKET_SIZE_LOG);
printf("LDM_LAG: %d\n", LDM_LAG);
printf("USE_CHECKSUM: %d\n", USE_CHECKSUM);
printf("INSERT_BY_TAG: %d\n", INSERT_BY_TAG);
printf("HASH_CHAR_OFFSET: %d\n", HASH_CHAR_OFFSET);
printf("=====================\n");
}

View File

@ -0,0 +1,197 @@
#ifndef LDM_H
#define LDM_H
#include "mem.h" // from /lib/common/mem.h
//#include "ldm_params.h"
// =============================================================================
// Modify the parameters in ldm_params.h if "ldm_params.h" is included.
// Otherwise, modify the parameters here.
// =============================================================================
#ifndef LDM_PARAMS_H
// Defines the size of the hash table.
// Note that this is not the number of buckets.
// Currently this should be less than WINDOW_SIZE_LOG + 4.
#define LDM_MEMORY_USAGE 23
// The number of entries in a hash bucket.
#define HASH_BUCKET_SIZE_LOG 3 // The maximum is 4 for now.
// Defines the lag in inserting elements into the hash table.
#define LDM_LAG 0
// The maximum window size when searching for matches.
// The maximum value is 30
#define LDM_WINDOW_SIZE_LOG 28
// The minimum match length.
// This should be a multiple of four.
#define LDM_MIN_MATCH_LENGTH 64
// If INSERT_BY_TAG, insert entries into the hash table as a function of the
// hash. Certain hashes will not be inserted.
//
// Otherwise, insert as a function of the position.
#define INSERT_BY_TAG 1
// Store a checksum with the hash table entries for faster comparison.
// This halves the number of entries the hash table can contain.
#define USE_CHECKSUM 1
#endif
// Output compression statistics.
#define COMPUTE_STATS
// Output the configuration.
#define OUTPUT_CONFIGURATION
// If defined, forces the probability of insertion to be approximately
// one per (1 << HASH_ONLY_EVERY_LOG). If not defined, the probability will be
// calculated based on the memory usage and window size for "even" insertion
// throughout the window.
// #define HASH_ONLY_EVERY_LOG 8
// =============================================================================
// The number of bytes storing the compressed and decompressed size
// in the header.
#define LDM_COMPRESSED_SIZE 8
#define LDM_DECOMPRESSED_SIZE 8
#define LDM_HEADER_SIZE ((LDM_COMPRESSED_SIZE)+(LDM_DECOMPRESSED_SIZE))
#define ML_BITS 4
#define ML_MASK ((1U<<ML_BITS)-1)
#define RUN_BITS (8-ML_BITS)
#define RUN_MASK ((1U<<RUN_BITS)-1)
// The number of bytes storing the offset.
#define LDM_OFFSET_SIZE 4
#define LDM_WINDOW_SIZE (1 << (LDM_WINDOW_SIZE_LOG))
// TODO: Match lengths that are too small do not use the hash table efficiently.
// There should be a minimum hash length given the hash table size.
#define LDM_HASH_LENGTH LDM_MIN_MATCH_LENGTH
typedef struct LDM_compressStats LDM_compressStats;
typedef struct LDM_CCtx LDM_CCtx;
typedef struct LDM_DCtx LDM_DCtx;
/**
* Compresses src into dst.
* Returns the compressed size if successful, 0 otherwise.
*
* NB: This currently ignores maxDstSize and assumes enough space is available.
*
* Block format (see lz4 documentation for more information):
* github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md
*
* A block is composed of sequences. Each sequence begins with a token, which
* is a one-byte value separated into two 4-bit fields.
*
* The first field uses the four high bits of the token and encodes the literal
* length. If the field value is 0, there is no literal. If it is 15,
* additional bytes are added (each ranging from 0 to 255) to the previous
* value to produce a total length.
*
* Following the token and optional length bytes are the literals.
*
* Next are the 4 bytes representing the offset of the match (2 in lz4),
* representing the position to copy the literals.
*
* The lower four bits of the token encode the match length. With additional
* bytes added similarly to the additional literal length bytes after the offset.
*
* The last sequence is incomplete and stops right after the literals.
*/
size_t LDM_compress(const void *src, size_t srcSize,
void *dst, size_t maxDstSize);
/**
* Initialize the compression context.
*
* Allocates memory for the hash table.
*
* Returns 0 if successful, 1 otherwise.
*/
size_t LDM_initializeCCtx(LDM_CCtx *cctx,
const void *src, size_t srcSize,
void *dst, size_t maxDstSize);
/**
* Frees up memory allocated in LDM_initializeCCtx().
*/
void LDM_destroyCCtx(LDM_CCtx *cctx);
/**
* Prints the distribution of offsets in the hash table.
*
* The offsets are defined as the distance of the hash table entry from the
* current input position of the cctx.
*/
void LDM_outputHashTableOffsetHistogram(const LDM_CCtx *cctx);
/**
* Outputs compression statistics to stdout.
*/
void LDM_printCompressStats(const LDM_compressStats *stats);
/**
* Encode the literal length followed by the literals.
*
* The literal length is written to the upper four bits of pToken, with
* additional bytes written to the output as needed (see lz4).
*
* This is followed by literalLength bytes corresponding to the literals.
*/
void LDM_encodeLiteralLengthAndLiterals(LDM_CCtx *cctx, BYTE *pToken,
const U64 literalLength);
/**
* Write current block (literals, literal length, match offset,
* match length).
*/
void LDM_outputBlock(LDM_CCtx *cctx,
const U64 literalLength,
const U32 offset,
const U64 matchLength);
/**
* Decompresses src into dst.
*
* Note: assumes src does not have a header.
*/
size_t LDM_decompress(const void *src, size_t srcSize,
void *dst, size_t maxDstSize);
/**
* Initialize the decompression context.
*/
void LDM_initializeDCtx(LDM_DCtx *dctx,
const void *src, size_t compressedSize,
void *dst, size_t maxDecompressedSize);
/**
* Reads the header from src and writes the compressed size and
* decompressed size into compressedSize and decompressedSize respectively.
*
* NB: LDM_compress and LDM_decompress currently do not add/read headers.
*/
void LDM_readHeader(const void *src, U64 *compressedSize,
U64 *decompressedSize);
/**
* Write the compressed and decompressed size.
*/
void LDM_writeHeader(void *memPtr, U64 compressedSize,
U64 decompressedSize);
/**
* Output the configuration used.
*/
void LDM_outputConfiguration(void);
#endif /* LDM_H */

View File

@ -0,0 +1,109 @@
#include <stdio.h>
#include "ldm.h"
/**
* This function reads the header at the beginning of src and writes
* the compressed and decompressed size to compressedSize and
* decompressedSize.
*
* The header consists of 16 bytes: 8 bytes each in little-endian format
* of the compressed size and the decompressed size.
*/
void LDM_readHeader(const void *src, U64 *compressedSize,
U64 *decompressedSize) {
const BYTE *ip = (const BYTE *)src;
*compressedSize = MEM_readLE64(ip);
*decompressedSize = MEM_readLE64(ip + 8);
}
/**
* Writes the 16-byte header (8-bytes each of the compressedSize and
* decompressedSize in little-endian format) to memPtr.
*/
void LDM_writeHeader(void *memPtr, U64 compressedSize,
U64 decompressedSize) {
MEM_writeLE64(memPtr, compressedSize);
MEM_writeLE64((BYTE *)memPtr + 8, decompressedSize);
}
struct LDM_DCtx {
size_t compressedSize;
size_t maxDecompressedSize;
const BYTE *ibase; /* Base of input */
const BYTE *ip; /* Current input position */
const BYTE *iend; /* End of source */
const BYTE *obase; /* Base of output */
BYTE *op; /* Current output position */
const BYTE *oend; /* End of output */
};
void LDM_initializeDCtx(LDM_DCtx *dctx,
const void *src, size_t compressedSize,
void *dst, size_t maxDecompressedSize) {
dctx->compressedSize = compressedSize;
dctx->maxDecompressedSize = maxDecompressedSize;
dctx->ibase = src;
dctx->ip = (const BYTE *)src;
dctx->iend = dctx->ip + dctx->compressedSize;
dctx->op = dst;
dctx->oend = dctx->op + dctx->maxDecompressedSize;
}
size_t LDM_decompress(const void *src, size_t compressedSize,
void *dst, size_t maxDecompressedSize) {
LDM_DCtx dctx;
LDM_initializeDCtx(&dctx, src, compressedSize, dst, maxDecompressedSize);
while (dctx.ip < dctx.iend) {
BYTE *cpy;
const BYTE *match;
size_t length, offset;
/* Get the literal length. */
const unsigned token = *(dctx.ip)++;
if ((length = (token >> ML_BITS)) == RUN_MASK) {
unsigned s;
do {
s = *(dctx.ip)++;
length += s;
} while (s == 255);
}
/* Copy the literals. */
cpy = dctx.op + length;
memcpy(dctx.op, dctx.ip, length);
dctx.ip += length;
dctx.op = cpy;
//TODO: dynamic offset size?
/* Encode the offset. */
offset = MEM_read32(dctx.ip);
dctx.ip += LDM_OFFSET_SIZE;
match = dctx.op - offset;
/* Get the match length. */
length = token & ML_MASK;
if (length == ML_MASK) {
unsigned s;
do {
s = *(dctx.ip)++;
length += s;
} while (s == 255);
}
length += LDM_MIN_MATCH_LENGTH;
/* Copy match. */
cpy = dctx.op + length;
// TODO: this can be made more efficient.
while (match < cpy - offset && dctx.op < dctx.oend) {
*(dctx.op)++ = *match++;
}
}
return dctx.op - (BYTE *)dst;
}

View File

@ -0,0 +1,12 @@
#ifndef LDM_PARAMS_H
#define LDM_PARAMS_H
#define LDM_MEMORY_USAGE 23
#define HASH_BUCKET_SIZE_LOG 3
#define LDM_LAG 0
#define LDM_WINDOW_SIZE_LOG 28
#define LDM_MIN_MATCH_LENGTH 64
#define INSERT_BY_TAG 1
#define USE_CHECKSUM 1
#endif // LDM_PARAMS_H

View File

@ -0,0 +1,269 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include <zstd.h>
#include <fcntl.h>
#include "ldm.h"
#include "zstd.h"
// #define DECOMPRESS_AND_VERIFY
/* Compress file given by fname and output to oname.
* Returns 0 if successful, error code otherwise.
*
* This adds a header from LDM_writeHeader to the beginning of the output.
*
* This might seg fault if the compressed size is > the decompress
* size due to the mmapping and output file size allocated to be the input size
* The compress function should check before writing or buffer writes.
*/
static int compress(const char *fname, const char *oname) {
int fdin, fdout;
struct stat statbuf;
char *src, *dst;
size_t maxCompressedSize, compressedSize;
struct timeval tv1, tv2;
double timeTaken;
/* Open the input file. */
if ((fdin = open(fname, O_RDONLY)) < 0) {
perror("Error in file opening");
return 1;
}
/* Open the output file. */
if ((fdout = open(oname, O_RDWR | O_CREAT | O_TRUNC, (mode_t)0600)) < 0) {
perror("Can't create output file");
return 1;
}
/* Find the size of the input file. */
if (fstat (fdin, &statbuf) < 0) {
perror("Fstat error");
return 1;
}
maxCompressedSize = (statbuf.st_size + LDM_HEADER_SIZE);
// Handle case where compressed size is > decompressed size.
// TODO: The compress function should check before writing or buffer writes.
maxCompressedSize += statbuf.st_size / 255;
ftruncate(fdout, maxCompressedSize);
/* mmap the input file. */
if ((src = mmap(0, statbuf.st_size, PROT_READ, MAP_SHARED, fdin, 0))
== (caddr_t) - 1) {
perror("mmap error for input");
return 1;
}
/* mmap the output file. */
if ((dst = mmap(0, maxCompressedSize, PROT_READ | PROT_WRITE,
MAP_SHARED, fdout, 0)) == (caddr_t) - 1) {
perror("mmap error for output");
return 1;
}
gettimeofday(&tv1, NULL);
compressedSize = LDM_HEADER_SIZE +
LDM_compress(src, statbuf.st_size,
dst + LDM_HEADER_SIZE, maxCompressedSize);
gettimeofday(&tv2, NULL);
// Write the header.
LDM_writeHeader(dst, compressedSize, statbuf.st_size);
// Truncate file to compressedSize.
ftruncate(fdout, compressedSize);
printf("%25s : %10lu -> %10lu - %s \n", fname,
(size_t)statbuf.st_size, (size_t)compressedSize, oname);
printf("Compression ratio: %.2fx --- %.1f%%\n",
(double)statbuf.st_size / (double)compressedSize,
(double)compressedSize / (double)(statbuf.st_size) * 100.0);
timeTaken = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec),
printf("Total compress time = %.3f seconds, Average scanning speed: %.3f MB/s\n",
timeTaken,
((double)statbuf.st_size / (double) (1 << 20)) / timeTaken);
// Close files.
close(fdin);
close(fdout);
return 0;
}
#ifdef DECOMPRESS_AND_VERIFY
/* Decompress file compressed using LDM_compress.
* The input file should have the LDM_HEADER followed by payload.
* Returns 0 if succesful, and an error code otherwise.
*/
static int decompress(const char *fname, const char *oname) {
int fdin, fdout;
struct stat statbuf;
char *src, *dst;
U64 compressedSize, decompressedSize;
size_t outSize;
/* Open the input file. */
if ((fdin = open(fname, O_RDONLY)) < 0) {
perror("Error in file opening");
return 1;
}
/* Open the output file. */
if ((fdout = open(oname, O_RDWR | O_CREAT | O_TRUNC, (mode_t)0600)) < 0) {
perror("Can't create output file");
return 1;
}
/* Find the size of the input file. */
if (fstat (fdin, &statbuf) < 0) {
perror("Fstat error");
return 1;
}
/* mmap the input file. */
if ((src = mmap(0, statbuf.st_size, PROT_READ, MAP_SHARED, fdin, 0))
== (caddr_t) - 1) {
perror("mmap error for input");
return 1;
}
/* Read the header. */
LDM_readHeader(src, &compressedSize, &decompressedSize);
ftruncate(fdout, decompressedSize);
/* mmap the output file */
if ((dst = mmap(0, decompressedSize, PROT_READ | PROT_WRITE,
MAP_SHARED, fdout, 0)) == (caddr_t) - 1) {
perror("mmap error for output");
return 1;
}
outSize = LDM_decompress(
src + LDM_HEADER_SIZE, statbuf.st_size - LDM_HEADER_SIZE,
dst, decompressedSize);
printf("Ret size out: %zu\n", outSize);
close(fdin);
close(fdout);
return 0;
}
/* Compare two files.
* Returns 0 iff they are the same.
*/
static int compare(FILE *fp0, FILE *fp1) {
int result = 0;
while (result == 0) {
char b0[1024];
char b1[1024];
const size_t r0 = fread(b0, 1, sizeof(b0), fp0);
const size_t r1 = fread(b1, 1, sizeof(b1), fp1);
result = (int)r0 - (int)r1;
if (0 == r0 || 0 == r1) break;
if (0 == result) result = memcmp(b0, b1, r0);
}
return result;
}
/* Verify the input file is the same as the decompressed file. */
static int verify(const char *inpFilename, const char *decFilename) {
FILE *inpFp, *decFp;
if ((inpFp = fopen(inpFilename, "rb")) == NULL) {
perror("Could not open input file\n");
return 1;
}
if ((decFp = fopen(decFilename, "rb")) == NULL) {
perror("Could not open decompressed file\n");
return 1;
}
printf("verify : %s <-> %s\n", inpFilename, decFilename);
{
const int cmp = compare(inpFp, decFp);
if(0 == cmp) {
printf("verify : OK\n");
} else {
printf("verify : NG\n");
return 1;
}
}
fclose(decFp);
fclose(inpFp);
return 0;
}
#endif
int main(int argc, const char *argv[]) {
const char * const exeName = argv[0];
char inpFilename[256] = { 0 };
char ldmFilename[256] = { 0 };
char decFilename[256] = { 0 };
if (argc < 2) {
printf("Wrong arguments\n");
printf("Usage:\n");
printf("%s FILE\n", exeName);
return 1;
}
snprintf(inpFilename, 256, "%s", argv[1]);
snprintf(ldmFilename, 256, "%s.ldm", argv[1]);
snprintf(decFilename, 256, "%s.ldm.dec", argv[1]);
printf("inp = [%s]\n", inpFilename);
printf("ldm = [%s]\n", ldmFilename);
printf("dec = [%s]\n", decFilename);
/* Compress */
{
if (compress(inpFilename, ldmFilename)) {
printf("Compress error\n");
return 1;
}
}
#ifdef DECOMPRESS_AND_VERIFY
/* Decompress */
{
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
if (decompress(ldmFilename, decFilename)) {
printf("Decompress error\n");
return 1;
}
gettimeofday(&tv2, NULL);
printf("Total decompress time = %f seconds\n",
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
}
/* verify */
if (verify(inpFilename, decFilename)) {
printf("Verification error\n");
return 1;
}
#endif
return 0;
}

View File

@ -813,7 +813,9 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
* Special: value 0 means "do not change strategy". */
</b>/* frame parameters */<b>
ZSTD_p_contentSizeFlag=200, </b>/* Content size is written into frame header _whenever known_ (default:1) */<b>
ZSTD_p_contentSizeFlag=200, </b>/* Content size is written into frame header _whenever known_ (default:1)<b>
* note that content size must be known at the beginning,
* it is sent using ZSTD_CCtx_setPledgedSrcSize() */
ZSTD_p_checksumFlag, </b>/* A 32-bits checksum of content is written at end of frame (default:0) */<b>
ZSTD_p_dictIDFlag, </b>/* When applicable, dictID of dictionary is provided in frame header (default:1) */<b>

View File

@ -80,9 +80,9 @@ extern "C" {
* bitStream encoding API (write forward)
********************************************/
/* bitStream can mix input from multiple sources.
* A critical property of these streams is that they encode and decode in **reverse** direction.
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
*/
* A critical property of these streams is that they encode and decode in **reverse** direction.
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
*/
typedef struct
{
size_t bitContainer;
@ -203,7 +203,7 @@ static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F,
/*! BIT_initCStream() :
* `dstCapacity` must be > sizeof(size_t)
* @return : 0 if success,
otherwise an error code (can be tested using ERR_isError() ) */
* otherwise an error code (can be tested using ERR_isError()) */
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
void* startPtr, size_t dstCapacity)
{
@ -217,8 +217,8 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
}
/*! BIT_addBits() :
can add up to 26 bits into `bitC`.
Does not check for register overflow ! */
* can add up to 26 bits into `bitC`.
* Note : does not check for register overflow ! */
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
size_t value, unsigned nbBits)
{
@ -268,7 +268,7 @@ MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
/*! BIT_closeCStream() :
* @return : size of CStream, in bytes,
or 0 if it could not fit into dstBuffer */
* or 0 if it could not fit into dstBuffer */
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
{
BIT_addBitsFast(bitC, 1, 1); /* endMark */
@ -279,14 +279,14 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
/*-********************************************************
* bitStream decoding
* bitStream decoding
**********************************************************/
/*! BIT_initDStream() :
* Initialize a BIT_DStream_t.
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
* `srcSize` must be the *exact* size of the bitStream, in bytes.
* @return : size of stream (== srcSize) or an errorCode if a problem is detected
*/
* Initialize a BIT_DStream_t.
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
* `srcSize` must be the *exact* size of the bitStream, in bytes.
* @return : size of stream (== srcSize), or an errorCode if a problem is detected
*/
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
@ -305,29 +305,30 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
/* fall-through */
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
/* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
/* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
/* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
/* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
/* fall-through */
default: break;
default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
}
@ -363,9 +364,8 @@ MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
* local register is not modified.
* On 32-bits, maxNbBits==24.
* On 64-bits, maxNbBits==56.
* @return : value extracted
*/
MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
* @return : value extracted */
MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
{
#if defined(__BMI__) && defined(__GNUC__) /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */
return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
@ -392,8 +392,7 @@ MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
/*! BIT_readBits() :
* Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register.
* @return : extracted value.
*/
* @return : extracted value. */
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
{
size_t const value = BIT_lookBits(bitD, nbBits);
@ -402,7 +401,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
}
/*! BIT_readBitsFast() :
* unsafe version; only works only if nbBits >= 1 */
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
{
size_t const value = BIT_lookBitsFast(bitD, nbBits);
@ -412,10 +411,10 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
}
/*! BIT_reloadDStream() :
* Refill `bitD` from buffer previously set in BIT_initDStream() .
* This function is safe, it guarantees it will not read beyond src buffer.
* @return : status of `BIT_DStream_t` internal register.
if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
* Refill `bitD` from buffer previously set in BIT_initDStream() .
* This function is safe, it guarantees it will not read beyond src buffer.
* @return : status of `BIT_DStream_t` internal register.
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
@ -446,8 +445,8 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
}
/*! BIT_endOfDStream() :
* @return Tells if DStream has exactly reached its end (all bits consumed).
*/
* @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
*/
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
{
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));

View File

@ -20,19 +20,17 @@ const char* ERR_getErrorString(ERR_enum code)
case PREFIX(GENERIC): return "Error (generic)";
case PREFIX(prefix_unknown): return "Unknown frame descriptor";
case PREFIX(version_unsupported): return "Version not supported";
case PREFIX(parameter_unknown): return "Unknown parameter type";
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
case PREFIX(frameParameter_unsupportedBy32bits): return "Frame parameter unsupported in 32-bits mode";
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
case PREFIX(compressionParameter_unsupported): return "Compression parameter is not supported";
case PREFIX(compressionParameter_outOfBound): return "Compression parameter is out of bound";
case PREFIX(corruption_detected): return "Corrupted block detected";
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
case PREFIX(parameter_unsupported): return "Unsupported parameter";
case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
case PREFIX(init_missing): return "Context should be init first";
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
case PREFIX(srcSize_wrong): return "Src size is incorrect";
case PREFIX(corruption_detected): return "Corrupted block detected";
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";

View File

@ -31,13 +31,14 @@
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef FSE_H
#define FSE_H
#if defined (__cplusplus)
extern "C" {
#endif
#ifndef FSE_H
#define FSE_H
/*-*****************************************
* Dependencies
@ -297,8 +298,10 @@ FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<
If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
*/
#endif /* FSE_H */
#ifdef FSE_STATIC_LINKING_ONLY
#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
#define FSE_H_FSE_STATIC_LINKING_ONLY
/* *** Dependency *** */
#include "bitstream.h"
@ -381,6 +384,11 @@ size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
typedef enum {
FSE_repeat_none, /**< Cannot use the previous table */
FSE_repeat_check, /**< Can use the previous table but it must be checked */
FSE_repeat_valid /**< Can use the previous table and it is asumed to be valid */
} FSE_repeat;
/* *****************************************
* FSE symbol compression API
@ -694,5 +702,3 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
#if defined (__cplusplus)
}
#endif
#endif /* FSE_H */

View File

@ -31,13 +31,13 @@
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef HUF_H_298734234
#define HUF_H_298734234
#if defined (__cplusplus)
extern "C" {
#endif
#ifndef HUF_H_298734234
#define HUF_H_298734234
/* *** Dependencies *** */
#include <stddef.h> /* size_t */
@ -124,6 +124,7 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const
#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
#endif /* HUF_H_298734234 */
/* ******************************************************************
* WARNING !!
@ -132,7 +133,8 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const
* because they are not guaranteed to remain stable in the future.
* Only consider them in association with static linking.
*******************************************************************/
#ifdef HUF_STATIC_LINKING_ONLY
#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
#define HUF_H_HUF_STATIC_LINKING_ONLY
/* *** Dependencies *** */
#include "mem.h" /* U32 */
@ -295,9 +297,6 @@ size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* c
#endif /* HUF_STATIC_LINKING_ONLY */
#if defined (__cplusplus)
}
#endif
#endif /* HUF_H_298734234 */

View File

@ -92,15 +92,15 @@ POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
* and full queues.
*/
ctx->queueSize = queueSize + 1;
ctx->queue = (POOL_job *)malloc(ctx->queueSize * sizeof(POOL_job));
ctx->queue = (POOL_job*) malloc(ctx->queueSize * sizeof(POOL_job));
ctx->queueHead = 0;
ctx->queueTail = 0;
pthread_mutex_init(&ctx->queueMutex, NULL);
pthread_cond_init(&ctx->queuePushCond, NULL);
pthread_cond_init(&ctx->queuePopCond, NULL);
(void)pthread_mutex_init(&ctx->queueMutex, NULL);
(void)pthread_cond_init(&ctx->queuePushCond, NULL);
(void)pthread_cond_init(&ctx->queuePopCond, NULL);
ctx->shutdown = 0;
/* Allocate space for the thread handles */
ctx->threads = (pthread_t *)malloc(numThreads * sizeof(pthread_t));
ctx->threads = (pthread_t*)malloc(numThreads * sizeof(pthread_t));
ctx->numThreads = 0;
/* Check for errors */
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
@ -153,8 +153,8 @@ size_t POOL_sizeof(POOL_ctx *ctx) {
+ ctx->numThreads * sizeof(pthread_t);
}
void POOL_add(void *ctxVoid, POOL_function function, void *opaque) {
POOL_ctx *ctx = (POOL_ctx *)ctxVoid;
void POOL_add(void* ctxVoid, POOL_function function, void *opaque) {
POOL_ctx* const ctx = (POOL_ctx*)ctxVoid;
if (!ctx) { return; }
pthread_mutex_lock(&ctx->queueMutex);
@ -183,22 +183,22 @@ struct POOL_ctx_s {
int data;
};
POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
(void)numThreads;
(void)queueSize;
return (POOL_ctx *)malloc(sizeof(POOL_ctx));
return (POOL_ctx*)malloc(sizeof(POOL_ctx));
}
void POOL_free(POOL_ctx *ctx) {
if (ctx) free(ctx);
void POOL_free(POOL_ctx* ctx) {
free(ctx);
}
void POOL_add(void *ctx, POOL_function function, void *opaque) {
void POOL_add(void* ctx, POOL_function function, void* opaque) {
(void)ctx;
function(opaque);
}
size_t POOL_sizeof(POOL_ctx *ctx) {
size_t POOL_sizeof(POOL_ctx* ctx) {
if (ctx==NULL) return 0; /* supports sizeof NULL */
return sizeof(*ctx);
}

View File

@ -19,11 +19,11 @@ extern "C" {
typedef struct POOL_ctx_s POOL_ctx;
/*! POOL_create() :
Create a thread pool with at most `numThreads` threads.
`numThreads` must be at least 1.
The maximum number of queued jobs before blocking is `queueSize`.
`queueSize` must be at least 1.
@return : The POOL_ctx pointer on success else NULL.
* Create a thread pool with at most `numThreads` threads.
* `numThreads` must be at least 1.
* The maximum number of queued jobs before blocking is `queueSize`.
* `queueSize` must be at least 1.
* @return : POOL_ctx pointer on success, else NULL.
*/
POOL_ctx *POOL_create(size_t numThreads, size_t queueSize);

View File

@ -42,14 +42,14 @@ extern "C" {
/* mutex */
#define pthread_mutex_t CRITICAL_SECTION
#define pthread_mutex_init(a,b) InitializeCriticalSection((a))
#define pthread_mutex_init(a,b) (InitializeCriticalSection((a)), 0)
#define pthread_mutex_destroy(a) DeleteCriticalSection((a))
#define pthread_mutex_lock(a) EnterCriticalSection((a))
#define pthread_mutex_unlock(a) LeaveCriticalSection((a))
/* condition variable */
#define pthread_cond_t CONDITION_VARIABLE
#define pthread_cond_init(a, b) InitializeConditionVariable((a))
#define pthread_cond_init(a, b) (InitializeConditionVariable((a)), 0)
#define pthread_cond_destroy(a) /* No delete */
#define pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
#define pthread_cond_signal(a) WakeConditionVariable((a))
@ -80,14 +80,14 @@ int _pthread_join(pthread_t* thread, void** value_ptr);
#else /* ZSTD_MULTITHREAD not defined */
/* No multithreading support */
#define pthread_mutex_t int /* #define rather than typedef, as sometimes pthread support is implicit, resulting in duplicated symbols */
#define pthread_mutex_init(a,b)
#define pthread_mutex_t int /* #define rather than typedef, because sometimes pthread support is implicit, resulting in duplicated symbols */
#define pthread_mutex_init(a,b) ((void)a, 0)
#define pthread_mutex_destroy(a)
#define pthread_mutex_lock(a)
#define pthread_mutex_unlock(a)
#define pthread_cond_t int
#define pthread_cond_init(a,b)
#define pthread_cond_init(a,b) ((void)a, 0)
#define pthread_cond_destroy(a)
#define pthread_cond_wait(a,b)
#define pthread_cond_signal(a)

View File

@ -37,43 +37,41 @@ extern "C" {
/*-****************************************
* error codes list
* note : this API is still considered unstable
* it should not be used with a dynamic library
* and shall not be used with a dynamic library.
* only static linking is allowed
******************************************/
typedef enum {
ZSTD_error_no_error,
ZSTD_error_GENERIC,
ZSTD_error_prefix_unknown,
ZSTD_error_version_unsupported,
ZSTD_error_parameter_unknown,
ZSTD_error_frameParameter_unsupported,
ZSTD_error_frameParameter_unsupportedBy32bits,
ZSTD_error_frameParameter_windowTooLarge,
ZSTD_error_compressionParameter_unsupported,
ZSTD_error_compressionParameter_outOfBound,
ZSTD_error_init_missing,
ZSTD_error_memory_allocation,
ZSTD_error_stage_wrong,
ZSTD_error_dstSize_tooSmall,
ZSTD_error_srcSize_wrong,
ZSTD_error_corruption_detected,
ZSTD_error_checksum_wrong,
ZSTD_error_tableLog_tooLarge,
ZSTD_error_maxSymbolValue_tooLarge,
ZSTD_error_maxSymbolValue_tooSmall,
ZSTD_error_dictionary_corrupted,
ZSTD_error_dictionary_wrong,
ZSTD_error_dictionaryCreation_failed,
ZSTD_error_frameIndex_tooLarge,
ZSTD_error_seekableIO,
ZSTD_error_maxCode
ZSTD_error_no_error = 0,
ZSTD_error_GENERIC = 1,
ZSTD_error_prefix_unknown = 10,
ZSTD_error_version_unsupported = 12,
ZSTD_error_frameParameter_unsupported = 14,
ZSTD_error_frameParameter_windowTooLarge = 16,
ZSTD_error_corruption_detected = 20,
ZSTD_error_checksum_wrong = 22,
ZSTD_error_dictionary_corrupted = 30,
ZSTD_error_dictionary_wrong = 32,
ZSTD_error_dictionaryCreation_failed = 34,
ZSTD_error_parameter_unsupported = 40,
ZSTD_error_parameter_outOfBound = 42,
ZSTD_error_tableLog_tooLarge = 44,
ZSTD_error_maxSymbolValue_tooLarge = 46,
ZSTD_error_maxSymbolValue_tooSmall = 48,
ZSTD_error_stage_wrong = 60,
ZSTD_error_init_missing = 62,
ZSTD_error_memory_allocation = 64,
ZSTD_error_dstSize_tooSmall = 70,
ZSTD_error_srcSize_wrong = 72,
ZSTD_error_frameIndex_tooLarge = 100,
ZSTD_error_seekableIO = 102,
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it may change in future versions! Use ZSTD_isError() instead */
} ZSTD_ErrorCode;
/*! ZSTD_getErrorCode() :
convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
which can be used to compare with enum list published above */
ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
#if defined (__cplusplus)

View File

@ -50,6 +50,10 @@
#include "error_private.h"
#define ZSTD_STATIC_LINKING_ONLY
#include "zstd.h"
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "huf.h"
#ifndef XXH_STATIC_LINKING_ONLY
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
#endif
@ -211,20 +215,6 @@ MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* s
*********************************************/
typedef struct ZSTD_stats_s ZSTD_stats_t;
typedef struct {
U32 off;
U32 len;
} ZSTD_match_t;
typedef struct {
U32 price;
U32 off;
U32 mlen;
U32 litlen;
U32 rep[ZSTD_REP_NUM];
} ZSTD_optimal_t;
typedef struct seqDef_s {
U32 offset;
U16 litLength;
@ -242,13 +232,31 @@ typedef struct {
BYTE* ofCode;
U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
U32 longLengthPos;
/* opt */
ZSTD_optimal_t* priceTable;
ZSTD_match_t* matchTable;
U32* matchLengthFreq;
U32* litLengthFreq;
U32 rep[ZSTD_REP_NUM];
U32 repToConfirm[ZSTD_REP_NUM];
} seqStore_t;
typedef struct {
U32 off;
U32 len;
} ZSTD_match_t;
typedef struct {
U32 price;
U32 off;
U32 mlen;
U32 litlen;
U32 rep[ZSTD_REP_NUM];
} ZSTD_optimal_t;
typedef struct {
U32* litFreq;
U32* litLengthFreq;
U32* matchLengthFreq;
U32* offCodeFreq;
ZSTD_match_t* matchTable;
ZSTD_optimal_t* priceTable;
U32 matchLengthSum;
U32 matchSum;
U32 litLengthSum;
@ -264,7 +272,19 @@ typedef struct {
U32 cachedPrice;
U32 cachedLitLength;
const BYTE* cachedLiterals;
} seqStore_t;
} optState_t;
typedef struct {
U32 hufCTable[HUF_CTABLE_SIZE_U32(255)];
FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
U32 workspace[HUF_WORKSPACE_SIZE_U32];
HUF_repeat hufCTable_repeatMode;
FSE_repeat offcode_repeatMode;
FSE_repeat matchlength_repeatMode;
FSE_repeat litlength_repeatMode;
} ZSTD_entropyCTables_t;
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);

View File

@ -781,7 +781,7 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
/* FSE_compress_wksp() :

View File

@ -56,7 +56,7 @@
* Error Management
****************************************************************/
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
@ -436,7 +436,7 @@ static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt*
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
#define HUF_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
#define HUF_FLUSHBITS(s) BIT_flushBits(s)
#define HUF_FLUSHBITS_1(stream) \
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
@ -451,7 +451,6 @@ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, si
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
size_t n;
const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize));
BIT_CStream_t bitC;
/* init */

View File

@ -36,13 +36,6 @@ static const U32 g_searchStrength = 8; /* control skip over incompressible dat
#define HASH_READ_SIZE 8
typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
/* entropy tables always have same size */
static size_t const hufCTable_size = HUF_CTABLE_SIZE(255);
static size_t const litlengthCTable_size = FSE_CTABLE_SIZE(LLFSELog, MaxLL);
static size_t const offcodeCTable_size = FSE_CTABLE_SIZE(OffFSELog, MaxOff);
static size_t const matchlengthCTable_size = FSE_CTABLE_SIZE(MLFSELog, MaxML);
static size_t const entropyScratchSpace_size = HUF_WORKSPACE_SIZE;
/*-*************************************
* Helper functions
@ -89,8 +82,6 @@ struct ZSTD_CCtx_s {
U32 loadedDictEnd; /* index of end of dictionary */
U32 forceWindow; /* force back-references to respect limit of 1<<wLog, even for dictionary */
ZSTD_compressionStage_e stage;
U32 rep[ZSTD_REP_NUM];
U32 repToConfirm[ZSTD_REP_NUM];
U32 dictID;
int compressionLevel;
ZSTD_parameters requestedParams;
@ -105,16 +96,11 @@ struct ZSTD_CCtx_s {
size_t staticSize;
seqStore_t seqStore; /* sequences storage ptrs */
optState_t optState;
U32* hashTable;
U32* hashTable3;
U32* chainTable;
HUF_repeat hufCTable_repeatMode;
HUF_CElt* hufCTable;
U32 fseCTables_ready;
FSE_CTable* offcodeCTable;
FSE_CTable* matchlengthCTable;
FSE_CTable* litlengthCTable;
unsigned* entropyScratchSpace;
ZSTD_entropyCTables_t* entropy;
/* streaming */
char* inBuff;
@ -174,19 +160,9 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
/* entropy space (never moves) */
/* note : this code should be shared with resetCCtx, rather than copy/pasted */
{ void* ptr = cctx->workSpace;
cctx->hufCTable = (HUF_CElt*)ptr;
ptr = (char*)cctx->hufCTable + hufCTable_size;
cctx->offcodeCTable = (FSE_CTable*) ptr;
ptr = (char*)ptr + offcodeCTable_size;
cctx->matchlengthCTable = (FSE_CTable*) ptr;
ptr = (char*)ptr + matchlengthCTable_size;
cctx->litlengthCTable = (FSE_CTable*) ptr;
ptr = (char*)ptr + litlengthCTable_size;
assert(((size_t)ptr & 3) == 0); /* ensure correct alignment */
cctx->entropyScratchSpace = (unsigned*) ptr;
}
if (cctx->workSpaceSize < sizeof(ZSTD_entropyCTables_t)) return NULL;
assert(((size_t)cctx->workSpace & 7) == 0); /* ensure correct alignment */
cctx->entropy = (ZSTD_entropyCTables_t*)cctx->workSpace;
return cctx;
}
@ -237,7 +213,7 @@ size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned
ZSTD_STATIC_ASSERT(ZSTD_dm_auto==0);
ZSTD_STATIC_ASSERT(ZSTD_dm_rawContent==1);
case ZSTD_p_forceRawDict : cctx->dictMode = (ZSTD_dictMode_e)(value>0); return 0;
default: return ERROR(parameter_unknown);
default: return ERROR(parameter_unsupported);
}
}
@ -251,9 +227,9 @@ static void ZSTD_cLevelToCParams(ZSTD_CCtx* cctx)
cctx->compressionLevel = ZSTD_CLEVEL_CUSTOM;
}
#define CLAMPCHECK(val,min,max) { \
if (((val)<(min)) | ((val)>(max))) { \
return ERROR(compressionParameter_outOfBound); \
#define CLAMPCHECK(val,min,max) { \
if (((val)<(min)) | ((val)>(max))) { \
return ERROR(parameter_outOfBound); \
} }
size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value)
@ -349,7 +325,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned v
/* restrict dictionary mode, to "rawContent" or "fullDict" only */
ZSTD_STATIC_ASSERT((U32)ZSTD_dm_fullDict > (U32)ZSTD_dm_rawContent);
if (value > (unsigned)ZSTD_dm_fullDict)
return ERROR(compressionParameter_outOfBound);
return ERROR(parameter_outOfBound);
cctx->dictMode = (ZSTD_dictMode_e)value;
return 0;
@ -370,31 +346,31 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned v
if (value==0) return 0;
DEBUGLOG(5, " setting nbThreads : %u", value);
#ifndef ZSTD_MULTITHREAD
if (value > 1) return ERROR(compressionParameter_unsupported);
if (value > 1) return ERROR(parameter_unsupported);
#endif
if ((value>1) && (cctx->nbThreads != value)) {
if (cctx->staticSize) /* MT not compatible with static alloc */
return ERROR(compressionParameter_unsupported);
return ERROR(parameter_unsupported);
ZSTDMT_freeCCtx(cctx->mtctx);
cctx->nbThreads = 1;
cctx->mtctx = ZSTDMT_createCCtx(value);
cctx->mtctx = ZSTDMT_createCCtx_advanced(value, cctx->customMem);
if (cctx->mtctx == NULL) return ERROR(memory_allocation);
}
cctx->nbThreads = value;
return 0;
case ZSTD_p_jobSize:
if (cctx->nbThreads <= 1) return ERROR(compressionParameter_unsupported);
if (cctx->nbThreads <= 1) return ERROR(parameter_unsupported);
assert(cctx->mtctx != NULL);
return ZSTDMT_setMTCtxParameter(cctx->mtctx, ZSTDMT_p_sectionSize, value);
case ZSTD_p_overlapSizeLog:
DEBUGLOG(5, " setting overlap with nbThreads == %u", cctx->nbThreads);
if (cctx->nbThreads <= 1) return ERROR(compressionParameter_unsupported);
if (cctx->nbThreads <= 1) return ERROR(parameter_unsupported);
assert(cctx->mtctx != NULL);
return ZSTDMT_setMTCtxParameter(cctx->mtctx, ZSTDMT_p_overlapSectionLog, value);
default: return ERROR(parameter_unknown);
default: return ERROR(parameter_unsupported);
}
}
@ -474,7 +450,8 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) return ERROR(compressionParameter_unsupported);
if ((U32)(cParams.strategy) > (U32)ZSTD_btultra)
return ERROR(parameter_unsupported);
return 0;
}
@ -551,9 +528,7 @@ size_t ZSTD_estimateCCtxSize_advanced(ZSTD_compressionParameters cParams)
size_t const hSize = ((size_t)1) << cParams.hashLog;
U32 const hashLog3 = (cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
size_t const h3Size = ((size_t)1) << hashLog3;
size_t const entropySpace = hufCTable_size + litlengthCTable_size
+ offcodeCTable_size + matchlengthCTable_size
+ entropyScratchSpace_size;
size_t const entropySpace = sizeof(ZSTD_entropyCTables_t);
size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
size_t const optBudget = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
@ -620,8 +595,8 @@ static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 ple
cctx->stage = ZSTDcs_init;
cctx->dictID = 0;
cctx->loadedDictEnd = 0;
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = repStartValue[i]; }
cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->seqStore.rep[i] = repStartValue[i]; }
cctx->optState.litLengthSum = 0; /* force reset of btopt stats */
XXH64_reset(&cctx->xxhState, 0);
return 0;
}
@ -641,8 +616,10 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
if (crp == ZSTDcrp_continue) {
if (ZSTD_equivalentParams(params.cParams, zc->appliedParams.cParams)) {
DEBUGLOG(5, "ZSTD_equivalentParams()==1");
zc->fseCTables_ready = 0;
zc->hufCTable_repeatMode = HUF_repeat_none;
zc->entropy->hufCTable_repeatMode = HUF_repeat_none;
zc->entropy->offcode_repeatMode = FSE_repeat_none;
zc->entropy->matchlength_repeatMode = FSE_repeat_none;
zc->entropy->litlength_repeatMode = FSE_repeat_none;
return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
} }
@ -662,9 +639,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
void* ptr;
/* Check if workSpace is large enough, alloc a new one if needed */
{ size_t const entropySpace = hufCTable_size + litlengthCTable_size
+ offcodeCTable_size + matchlengthCTable_size
+ entropyScratchSpace_size;
{ size_t const entropySpace = sizeof(ZSTD_entropyCTables_t);
size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
+ (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
size_t const optSpace = ( (params.cParams.strategy == ZSTD_btopt)
@ -689,16 +664,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ptr = zc->workSpace;
/* entropy space */
zc->hufCTable = (HUF_CElt*)ptr;
ptr = (char*)zc->hufCTable + hufCTable_size; /* note : HUF_CElt* is incomplete type, size is estimated via macro */
zc->offcodeCTable = (FSE_CTable*) ptr;
ptr = (char*)ptr + offcodeCTable_size;
zc->matchlengthCTable = (FSE_CTable*) ptr;
ptr = (char*)ptr + matchlengthCTable_size;
zc->litlengthCTable = (FSE_CTable*) ptr;
ptr = (char*)ptr + litlengthCTable_size;
assert(((size_t)ptr & 3) == 0); /* ensure correct alignment */
zc->entropyScratchSpace = (unsigned*) ptr;
assert(((size_t)zc->workSpace & 3) == 0); /* ensure correct alignment */
assert(zc->workSpaceSize >= sizeof(ZSTD_entropyCTables_t));
zc->entropy = (ZSTD_entropyCTables_t*)zc->workSpace;
} }
/* init params */
@ -715,39 +683,35 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->stage = ZSTDcs_init;
zc->dictID = 0;
zc->loadedDictEnd = 0;
zc->fseCTables_ready = 0;
zc->hufCTable_repeatMode = HUF_repeat_none;
zc->entropy->hufCTable_repeatMode = HUF_repeat_none;
zc->entropy->offcode_repeatMode = FSE_repeat_none;
zc->entropy->matchlength_repeatMode = FSE_repeat_none;
zc->entropy->litlength_repeatMode = FSE_repeat_none;
zc->nextToUpdate = 1;
zc->nextSrc = NULL;
zc->base = NULL;
zc->dictBase = NULL;
zc->dictLimit = 0;
zc->lowLimit = 0;
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = repStartValue[i]; }
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->seqStore.rep[i] = repStartValue[i]; }
zc->hashLog3 = hashLog3;
zc->seqStore.litLengthSum = 0;
zc->optState.litLengthSum = 0;
/* ensure entropy tables are close together at the beginning */
assert((void*)zc->hufCTable == zc->workSpace);
assert((char*)zc->offcodeCTable == (char*)zc->hufCTable + hufCTable_size);
assert((char*)zc->matchlengthCTable == (char*)zc->offcodeCTable + offcodeCTable_size);
assert((char*)zc->litlengthCTable == (char*)zc->matchlengthCTable + matchlengthCTable_size);
assert((char*)zc->entropyScratchSpace == (char*)zc->litlengthCTable + litlengthCTable_size);
ptr = (char*)zc->entropyScratchSpace + entropyScratchSpace_size;
ptr = zc->entropy + 1;
/* opt parser space */
if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btultra)) {
DEBUGLOG(5, "reserving optimal parser space");
assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
zc->seqStore.litFreq = (U32*)ptr;
zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1<<Litbits);
zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL+1);
zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML+1);
ptr = zc->seqStore.offCodeFreq + (MaxOff+1);
zc->seqStore.matchTable = (ZSTD_match_t*)ptr;
ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM+1;
zc->seqStore.priceTable = (ZSTD_optimal_t*)ptr;
ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM+1;
zc->optState.litFreq = (U32*)ptr;
zc->optState.litLengthFreq = zc->optState.litFreq + (1<<Litbits);
zc->optState.matchLengthFreq = zc->optState.litLengthFreq + (MaxLL+1);
zc->optState.offCodeFreq = zc->optState.matchLengthFreq + (MaxML+1);
ptr = zc->optState.offCodeFreq + (MaxOff+1);
zc->optState.matchTable = (ZSTD_match_t*)ptr;
ptr = zc->optState.matchTable + ZSTD_OPT_NUM+1;
zc->optState.priceTable = (ZSTD_optimal_t*)ptr;
ptr = zc->optState.priceTable + ZSTD_OPT_NUM+1;
}
/* table Space */
@ -783,7 +747,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
* do not use with extDict variant ! */
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
int i;
for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = 0;
for (i=0; i<ZSTD_REP_NUM; i++) cctx->seqStore.rep[i] = 0;
}
@ -830,16 +794,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
dstCCtx->dictID = srcCCtx->dictID;
/* copy entropy tables */
dstCCtx->fseCTables_ready = srcCCtx->fseCTables_ready;
if (srcCCtx->fseCTables_ready) {
memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, litlengthCTable_size);
memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, matchlengthCTable_size);
memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, offcodeCTable_size);
}
dstCCtx->hufCTable_repeatMode = srcCCtx->hufCTable_repeatMode;
if (srcCCtx->hufCTable_repeatMode) {
memcpy(dstCCtx->hufCTable, srcCCtx->hufCTable, hufCTable_size);
}
memcpy(dstCCtx->entropy, srcCCtx->entropy, sizeof(ZSTD_entropyCTables_t));
return 0;
}
@ -956,7 +911,8 @@ static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, cons
static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc,
static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t * entropy,
ZSTD_strategy strategy,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
@ -970,28 +926,28 @@ static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc,
/* small ? don't even attempt compression (speed opt) */
# define LITERAL_NOENTROPY 63
{ size_t const minLitSize = zc->hufCTable_repeatMode == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
{ size_t const minLitSize = entropy->hufCTable_repeatMode == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
{ HUF_repeat repeat = zc->hufCTable_repeatMode;
int const preferRepeat = zc->appliedParams.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
{ HUF_repeat repeat = entropy->hufCTable_repeatMode;
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
zc->entropyScratchSpace, entropyScratchSpace_size, zc->hufCTable, &repeat, preferRepeat)
entropy->workspace, sizeof(entropy->workspace), (HUF_CElt*)entropy->hufCTable, &repeat, preferRepeat)
: HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
zc->entropyScratchSpace, entropyScratchSpace_size, zc->hufCTable, &repeat, preferRepeat);
entropy->workspace, sizeof(entropy->workspace), (HUF_CElt*)entropy->hufCTable, &repeat, preferRepeat);
if (repeat != HUF_repeat_none) { hType = set_repeat; } /* reused the existing table */
else { zc->hufCTable_repeatMode = HUF_repeat_check; } /* now have a table to reuse */
else { entropy->hufCTable_repeatMode = HUF_repeat_check; } /* now have a table to reuse */
}
if ((cLitSize==0) | (cLitSize >= srcSize - minGain)) {
zc->hufCTable_repeatMode = HUF_repeat_none;
entropy->hufCTable_repeatMode = HUF_repeat_none;
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
if (cLitSize==1) {
zc->hufCTable_repeatMode = HUF_repeat_none;
entropy->hufCTable_repeatMode = HUF_repeat_none;
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
}
@ -1062,17 +1018,155 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
}
MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
MEM_STATIC symbolEncodingType_e ZSTD_selectEncodingType(FSE_repeat* repeatMode,
size_t const mostFrequent, size_t nbSeq, U32 defaultNormLog)
{
#define MIN_SEQ_FOR_DYNAMIC_FSE 64
#define MAX_SEQ_FOR_STATIC_FSE 1000
if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
*repeatMode = FSE_repeat_check;
return set_rle;
}
if ((*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
return set_repeat;
}
if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1)))) {
*repeatMode = FSE_repeat_valid;
return set_basic;
}
*repeatMode = FSE_repeat_check;
return set_compressed;
}
MEM_STATIC size_t ZSTD_buildCTable(void* dst, size_t dstCapacity,
FSE_CTable* CTable, U32 FSELog, symbolEncodingType_e type,
U32* count, U32 max,
BYTE const* codeTable, size_t nbSeq,
S16 const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
void* workspace, size_t workspaceSize)
{
BYTE* op = (BYTE*)dst;
BYTE const* const oend = op + dstCapacity;
switch (type) {
case set_rle:
*op = codeTable[0];
CHECK_F(FSE_buildCTable_rle(CTable, (BYTE)max));
return 1;
case set_repeat:
return 0;
case set_basic:
CHECK_F(FSE_buildCTable_wksp(CTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));
return 0;
case set_compressed: {
S16 norm[MaxSeq + 1];
size_t nbSeq_1 = nbSeq;
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
if (count[codeTable[nbSeq-1]] > 1) {
count[codeTable[nbSeq-1]]--;
nbSeq_1--;
}
CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
if (FSE_isError(NCountSize)) return NCountSize;
CHECK_F(FSE_buildCTable_wksp(CTable, norm, max, tableLog, workspace, workspaceSize));
return NCountSize;
}
}
default: return assert(0), ERROR(GENERIC);
}
}
MEM_STATIC size_t ZSTD_encodeSequences(void* dst, size_t dstCapacity,
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
seqDef const* sequences, size_t nbSeq, int longOffsets)
{
BIT_CStream_t blockStream;
FSE_CState_t stateMatchLength;
FSE_CState_t stateOffsetBits;
FSE_CState_t stateLitLength;
CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
/* first symbols */
FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
if (MEM_32bits()) BIT_flushBits(&blockStream);
BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
if (MEM_32bits()) BIT_flushBits(&blockStream);
if (longOffsets) {
U32 const ofBits = ofCodeTable[nbSeq-1];
int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
BIT_flushBits(&blockStream);
}
BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
ofBits - extraBits);
} else {
BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
}
BIT_flushBits(&blockStream);
{ size_t n;
for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
BYTE const llCode = llCodeTable[n];
BYTE const ofCode = ofCodeTable[n];
BYTE const mlCode = mlCodeTable[n];
U32 const llBits = LL_bits[llCode];
U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
U32 const mlBits = ML_bits[mlCode];
/* (7)*/ /* (7)*/
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
BIT_flushBits(&blockStream); /* (7)*/
BIT_addBits(&blockStream, sequences[n].litLength, llBits);
if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
if (longOffsets) {
int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
BIT_addBits(&blockStream, sequences[n].offset, extraBits);
BIT_flushBits(&blockStream); /* (7)*/
}
BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
ofBits - extraBits); /* 31 */
} else {
BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
}
BIT_flushBits(&blockStream); /* (7)*/
} }
FSE_flushCState(&blockStream, &stateMatchLength);
FSE_flushCState(&blockStream, &stateOffsetBits);
FSE_flushCState(&blockStream, &stateLitLength);
{ size_t const streamSize = BIT_closeCStream(&blockStream);
if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
return streamSize;
}
}
MEM_STATIC size_t ZSTD_compressSequences (seqStore_t* seqStorePtr,
ZSTD_entropyCTables_t* entropy,
ZSTD_compressionParameters const* cParams,
void* dst, size_t dstCapacity,
size_t srcSize)
{
const int longOffsets = zc->appliedParams.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
const seqStore_t* seqStorePtr = &(zc->seqStore);
const int longOffsets = cParams->windowLog > STREAM_ACCUMULATOR_MIN;
U32 count[MaxSeq+1];
S16 norm[MaxSeq+1];
FSE_CTable* CTable_LitLength = zc->litlengthCTable;
FSE_CTable* CTable_OffsetBits = zc->offcodeCTable;
FSE_CTable* CTable_MatchLength = zc->matchlengthCTable;
FSE_CTable* CTable_LitLength = entropy->litlengthCTable;
FSE_CTable* CTable_OffsetBits = entropy->offcodeCTable;
FSE_CTable* CTable_MatchLength = entropy->matchlengthCTable;
U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
const seqDef* const sequences = seqStorePtr->sequencesStart;
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
@ -1083,13 +1177,16 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
BYTE* op = ostart;
size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
BYTE* seqHead;
BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
ZSTD_STATIC_ASSERT(sizeof(entropy->workspace) >= (1<<MAX(MLFSELog,LLFSELog)));
/* Compress literals */
{ const BYTE* const literals = seqStorePtr->litStart;
size_t const litSize = seqStorePtr->lit - literals;
size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
if (ZSTD_isError(cSize)) return cSize;
size_t const cSize = ZSTD_compressLiterals(
entropy, cParams->strategy, op, dstCapacity, literals, litSize);
if (ZSTD_isError(cSize))
return cSize;
op += cSize;
}
@ -1103,170 +1200,65 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
/* seqHead : flags for FSE encoding type */
seqHead = op++;
#define MIN_SEQ_FOR_DYNAMIC_FSE 64
#define MAX_SEQ_FOR_STATIC_FSE 1000
/* convert length/distances into codes */
ZSTD_seqToCodes(seqStorePtr);
/* CTable for Literal Lengths */
{ U32 max = MaxLL;
size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->entropyScratchSpace);
if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
*op++ = llCodeTable[0];
FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
LLtype = set_rle;
} else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
LLtype = set_repeat;
} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog-1)))) {
FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
LLtype = set_basic;
} else {
size_t nbSeq_1 = nbSeq;
const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
if (count[llCodeTable[nbSeq-1]]>1) { count[llCodeTable[nbSeq-1]]--; nbSeq_1--; }
FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
{ size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
if (FSE_isError(NCountSize)) return NCountSize;
op += NCountSize; }
FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
LLtype = set_compressed;
size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, entropy->workspace);
LLtype = ZSTD_selectEncodingType(&entropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog);
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
entropy->workspace, sizeof(entropy->workspace));
if (ZSTD_isError(countSize)) return countSize;
op += countSize;
} }
/* CTable for Offsets */
{ U32 max = MaxOff;
size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->entropyScratchSpace);
if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
*op++ = ofCodeTable[0];
FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
Offtype = set_rle;
} else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
Offtype = set_repeat;
} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog-1)))) {
FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
Offtype = set_basic;
} else {
size_t nbSeq_1 = nbSeq;
const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
if (count[ofCodeTable[nbSeq-1]]>1) { count[ofCodeTable[nbSeq-1]]--; nbSeq_1--; }
FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
{ size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
if (FSE_isError(NCountSize)) return NCountSize;
op += NCountSize; }
FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
Offtype = set_compressed;
size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, entropy->workspace);
Offtype = ZSTD_selectEncodingType(&entropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog);
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, MaxOff,
entropy->workspace, sizeof(entropy->workspace));
if (ZSTD_isError(countSize)) return countSize;
op += countSize;
} }
/* CTable for MatchLengths */
{ U32 max = MaxML;
size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->entropyScratchSpace);
if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
*op++ = *mlCodeTable;
FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
MLtype = set_rle;
} else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
MLtype = set_repeat;
} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog-1)))) {
FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
MLtype = set_basic;
} else {
size_t nbSeq_1 = nbSeq;
const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
if (count[mlCodeTable[nbSeq-1]]>1) { count[mlCodeTable[nbSeq-1]]--; nbSeq_1--; }
FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
{ size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
if (FSE_isError(NCountSize)) return NCountSize;
op += NCountSize; }
FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
MLtype = set_compressed;
size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, entropy->workspace);
MLtype = ZSTD_selectEncodingType(&entropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog);
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
entropy->workspace, sizeof(entropy->workspace));
if (ZSTD_isError(countSize)) return countSize;
op += countSize;
} }
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
zc->fseCTables_ready = 0;
/* Encoding Sequences */
{ BIT_CStream_t blockStream;
FSE_CState_t stateMatchLength;
FSE_CState_t stateOffsetBits;
FSE_CState_t stateLitLength;
{ size_t const streamSize = ZSTD_encodeSequences(op, oend - op,
CTable_MatchLength, mlCodeTable,
CTable_OffsetBits, ofCodeTable,
CTable_LitLength, llCodeTable,
sequences, nbSeq, longOffsets);
if (ZSTD_isError(streamSize)) return streamSize;
op += streamSize;
}
CHECK_E(BIT_initCStream(&blockStream, op, oend-op), dstSize_tooSmall); /* not enough space remaining */
/* first symbols */
FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
if (MEM_32bits()) BIT_flushBits(&blockStream);
BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
if (MEM_32bits()) BIT_flushBits(&blockStream);
if (longOffsets) {
U32 const ofBits = ofCodeTable[nbSeq-1];
int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
BIT_flushBits(&blockStream);
}
BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
ofBits - extraBits);
} else {
BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
}
BIT_flushBits(&blockStream);
{ size_t n;
for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
BYTE const llCode = llCodeTable[n];
BYTE const ofCode = ofCodeTable[n];
BYTE const mlCode = mlCodeTable[n];
U32 const llBits = LL_bits[llCode];
U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
U32 const mlBits = ML_bits[mlCode];
/* (7)*/ /* (7)*/
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
BIT_flushBits(&blockStream); /* (7)*/
BIT_addBits(&blockStream, sequences[n].litLength, llBits);
if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
if (longOffsets) {
int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
BIT_addBits(&blockStream, sequences[n].offset, extraBits);
BIT_flushBits(&blockStream); /* (7)*/
}
BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
ofBits - extraBits); /* 31 */
} else {
BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
}
BIT_flushBits(&blockStream); /* (7)*/
} }
FSE_flushCState(&blockStream, &stateMatchLength);
FSE_flushCState(&blockStream, &stateOffsetBits);
FSE_flushCState(&blockStream, &stateLitLength);
{ size_t const streamSize = BIT_closeCStream(&blockStream);
if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
op += streamSize;
} }
/* check compressibility */
_check_compressibility:
{ size_t const minGain = ZSTD_minGain(srcSize);
size_t const maxCSize = srcSize - minGain;
if ((size_t)(op-ostart) >= maxCSize) {
zc->hufCTable_repeatMode = HUF_repeat_none;
entropy->hufCTable_repeatMode = HUF_repeat_none;
entropy->offcode_repeatMode = FSE_repeat_none;
entropy->matchlength_repeatMode = FSE_repeat_none;
entropy->litlength_repeatMode = FSE_repeat_none;
return 0;
} }
/* confirm repcodes */
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->repToConfirm[i]; }
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->rep[i] = seqStorePtr->repToConfirm[i]; }
return op - ostart;
}
@ -1491,7 +1483,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
const BYTE* const lowest = base + lowestIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1];
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
U32 offsetSaved = 0;
/* init */
@ -1552,8 +1544,8 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
} } }
/* save reps for next block */
cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
/* Last Literals */
{ size_t const lastLLSize = iend - anchor;
@ -1601,7 +1593,7 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
/* Search Loop */
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
@ -1667,7 +1659,7 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
} } }
/* save reps for next block */
ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
/* Last Literals */
{ size_t const lastLLSize = iend - anchor;
@ -1736,7 +1728,7 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
const BYTE* const lowest = base + lowestIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1];
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
U32 offsetSaved = 0;
/* init */
@ -1823,8 +1815,8 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
} } }
/* save reps for next block */
cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
/* Last Literals */
{ size_t const lastLLSize = iend - anchor;
@ -1873,7 +1865,7 @@ static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
/* Search Loop */
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
@ -1973,7 +1965,7 @@ static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
} } }
/* save reps for next block */
ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
/* Last Literals */
{ size_t const lastLLSize = iend - anchor;
@ -2409,7 +2401,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
size_t* offsetPtr,
U32 maxNbAttempts, U32 matchLengthSearch);
searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset=0;
U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1], savedOffset=0;
/* init */
ip += (ip==base);
@ -2519,8 +2511,8 @@ _storeSequence:
} }
/* Save reps for next block */
ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
/* Last Literals */
{ size_t const lastLLSize = iend - anchor;
@ -2578,7 +2570,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
U32 maxNbAttempts, U32 matchLengthSearch);
searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1];
/* init */
ctx->nextToUpdate3 = ctx->nextToUpdate;
@ -2714,7 +2706,7 @@ _storeSequence:
} }
/* Save reps for next block */
ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
/* Last Literals */
{ size_t const lastLLSize = iend - anchor;
@ -2823,7 +2815,7 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCa
if (current > zc->nextToUpdate + 384)
zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384)); /* limited update after finding a very long match */
blockCompressor(zc, src, srcSize);
return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
return ZSTD_compressSequences(&zc->seqStore, zc->entropy, &zc->appliedParams.cParams, dst, dstCapacity, srcSize);
}
@ -3105,13 +3097,14 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
const BYTE* const dictEnd = dictPtr + dictSize;
short offcodeNCount[MaxOff+1];
unsigned offcodeMaxValue = MaxOff;
BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
ZSTD_STATIC_ASSERT(sizeof(cctx->entropy->workspace) >= (1<<MAX(MLFSELog,LLFSELog)));
dictPtr += 4; /* skip magic number */
cctx->dictID = cctx->appliedParams.fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr);
dictPtr += 4;
{ size_t const hufHeaderSize = HUF_readCTable(cctx->hufCTable, 255, dictPtr, dictEnd-dictPtr);
{ size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)cctx->entropy->hufCTable, 255, dictPtr, dictEnd-dictPtr);
if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
dictPtr += hufHeaderSize;
}
@ -3121,7 +3114,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
/* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
CHECK_E( FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, scratchBuffer, sizeof(scratchBuffer)),
CHECK_E( FSE_buildCTable_wksp(cctx->entropy->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
dictionary_corrupted);
dictPtr += offcodeHeaderSize;
}
@ -3133,7 +3126,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
/* Every match length code must have non-zero probability */
CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
CHECK_E( FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, scratchBuffer, sizeof(scratchBuffer)),
CHECK_E( FSE_buildCTable_wksp(cctx->entropy->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
dictionary_corrupted);
dictPtr += matchlengthHeaderSize;
}
@ -3145,15 +3138,15 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
/* Every literal length code must have non-zero probability */
CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
CHECK_E( FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, scratchBuffer, sizeof(scratchBuffer)),
CHECK_E( FSE_buildCTable_wksp(cctx->entropy->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
dictionary_corrupted);
dictPtr += litlengthHeaderSize;
}
if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
cctx->rep[0] = MEM_readLE32(dictPtr+0);
cctx->rep[1] = MEM_readLE32(dictPtr+4);
cctx->rep[2] = MEM_readLE32(dictPtr+8);
cctx->seqStore.rep[0] = MEM_readLE32(dictPtr+0);
cctx->seqStore.rep[1] = MEM_readLE32(dictPtr+4);
cctx->seqStore.rep[2] = MEM_readLE32(dictPtr+8);
dictPtr += 12;
{ size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
@ -3167,12 +3160,14 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t
/* All repCodes must be <= dictContentSize and != 0*/
{ U32 u;
for (u=0; u<3; u++) {
if (cctx->rep[u] == 0) return ERROR(dictionary_corrupted);
if (cctx->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
if (cctx->seqStore.rep[u] == 0) return ERROR(dictionary_corrupted);
if (cctx->seqStore.rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
} }
cctx->fseCTables_ready = 1;
cctx->hufCTable_repeatMode = HUF_repeat_valid;
cctx->entropy->hufCTable_repeatMode = HUF_repeat_valid;
cctx->entropy->offcode_repeatMode = FSE_repeat_valid;
cctx->entropy->matchlength_repeatMode = FSE_repeat_valid;
cctx->entropy->litlength_repeatMode = FSE_repeat_valid;
return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
}
}

View File

@ -22,173 +22,173 @@
/*-*************************************
* Price functions for optimal parser
***************************************/
FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t* ssPtr)
FORCE_INLINE void ZSTD_setLog2Prices(optState_t* optPtr)
{
ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum+1);
ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum+1);
ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum+1);
ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum+1);
ssPtr->factor = 1 + ((ssPtr->litSum>>5) / ssPtr->litLengthSum) + ((ssPtr->litSum<<1) / (ssPtr->litSum + ssPtr->matchSum));
optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1);
optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1);
optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1);
optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1);
optPtr->factor = 1 + ((optPtr->litSum>>5) / optPtr->litLengthSum) + ((optPtr->litSum<<1) / (optPtr->litSum + optPtr->matchSum));
}
MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr, const BYTE* src, size_t srcSize)
MEM_STATIC void ZSTD_rescaleFreqs(optState_t* optPtr, const BYTE* src, size_t srcSize)
{
unsigned u;
ssPtr->cachedLiterals = NULL;
ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
ssPtr->staticPrices = 0;
optPtr->cachedLiterals = NULL;
optPtr->cachedPrice = optPtr->cachedLitLength = 0;
optPtr->staticPrices = 0;
if (ssPtr->litLengthSum == 0) {
if (srcSize <= 1024) ssPtr->staticPrices = 1;
if (optPtr->litLengthSum == 0) {
if (srcSize <= 1024) optPtr->staticPrices = 1;
assert(ssPtr->litFreq!=NULL);
assert(optPtr->litFreq!=NULL);
for (u=0; u<=MaxLit; u++)
ssPtr->litFreq[u] = 0;
optPtr->litFreq[u] = 0;
for (u=0; u<srcSize; u++)
ssPtr->litFreq[src[u]]++;
optPtr->litFreq[src[u]]++;
ssPtr->litSum = 0;
ssPtr->litLengthSum = MaxLL+1;
ssPtr->matchLengthSum = MaxML+1;
ssPtr->offCodeSum = (MaxOff+1);
ssPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits);
optPtr->litSum = 0;
optPtr->litLengthSum = MaxLL+1;
optPtr->matchLengthSum = MaxML+1;
optPtr->offCodeSum = (MaxOff+1);
optPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits);
for (u=0; u<=MaxLit; u++) {
ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV);
ssPtr->litSum += ssPtr->litFreq[u];
optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>ZSTD_FREQ_DIV);
optPtr->litSum += optPtr->litFreq[u];
}
for (u=0; u<=MaxLL; u++)
ssPtr->litLengthFreq[u] = 1;
optPtr->litLengthFreq[u] = 1;
for (u=0; u<=MaxML; u++)
ssPtr->matchLengthFreq[u] = 1;
optPtr->matchLengthFreq[u] = 1;
for (u=0; u<=MaxOff; u++)
ssPtr->offCodeFreq[u] = 1;
optPtr->offCodeFreq[u] = 1;
} else {
ssPtr->matchLengthSum = 0;
ssPtr->litLengthSum = 0;
ssPtr->offCodeSum = 0;
ssPtr->matchSum = 0;
ssPtr->litSum = 0;
optPtr->matchLengthSum = 0;
optPtr->litLengthSum = 0;
optPtr->offCodeSum = 0;
optPtr->matchSum = 0;
optPtr->litSum = 0;
for (u=0; u<=MaxLit; u++) {
ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1));
ssPtr->litSum += ssPtr->litFreq[u];
optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1));
optPtr->litSum += optPtr->litFreq[u];
}
for (u=0; u<=MaxLL; u++) {
ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
optPtr->litLengthSum += optPtr->litLengthFreq[u];
}
for (u=0; u<=MaxML; u++) {
ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
optPtr->matchLengthSum += optPtr->matchLengthFreq[u];
optPtr->matchSum += optPtr->matchLengthFreq[u] * (u + 3);
}
ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
optPtr->matchSum *= ZSTD_LITFREQ_ADD;
for (u=0; u<=MaxOff; u++) {
ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
optPtr->offCodeSum += optPtr->offCodeFreq[u];
}
}
ZSTD_setLog2Prices(ssPtr);
ZSTD_setLog2Prices(optPtr);
}
FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t* ssPtr, U32 litLength, const BYTE* literals)
FORCE_INLINE U32 ZSTD_getLiteralPrice(optState_t* optPtr, U32 litLength, const BYTE* literals)
{
U32 price, u;
if (ssPtr->staticPrices)
if (optPtr->staticPrices)
return ZSTD_highbit32((U32)litLength+1) + (litLength*6);
if (litLength == 0)
return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0]+1);
return optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[0]+1);
/* literals */
if (ssPtr->cachedLiterals == literals) {
U32 const additional = litLength - ssPtr->cachedLitLength;
const BYTE* literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
if (optPtr->cachedLiterals == literals) {
U32 const additional = litLength - optPtr->cachedLitLength;
const BYTE* literals2 = optPtr->cachedLiterals + optPtr->cachedLitLength;
price = optPtr->cachedPrice + additional * optPtr->log2litSum;
for (u=0; u < additional; u++)
price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]]+1);
ssPtr->cachedPrice = price;
ssPtr->cachedLitLength = litLength;
price -= ZSTD_highbit32(optPtr->litFreq[literals2[u]]+1);
optPtr->cachedPrice = price;
optPtr->cachedLitLength = litLength;
} else {
price = litLength * ssPtr->log2litSum;
price = litLength * optPtr->log2litSum;
for (u=0; u < litLength; u++)
price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]]+1);
price -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1);
if (litLength >= 12) {
ssPtr->cachedLiterals = literals;
ssPtr->cachedPrice = price;
ssPtr->cachedLitLength = litLength;
optPtr->cachedLiterals = literals;
optPtr->cachedPrice = price;
optPtr->cachedLitLength = litLength;
}
}
/* literal Length */
{ const BYTE LL_deltaCode = 19;
const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode]+1);
price += LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
}
return price;
}
FORCE_INLINE U32 ZSTD_getPrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra)
FORCE_INLINE U32 ZSTD_getPrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra)
{
/* offset */
U32 price;
BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
if (seqStorePtr->staticPrices)
return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode;
if (optPtr->staticPrices)
return ZSTD_getLiteralPrice(optPtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode;
price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode]+1);
price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1);
if (!ultra && offCode >= 20) price += (offCode-19)*2;
/* match Length */
{ const BYTE ML_deltaCode = 36;
const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode]+1);
price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1);
}
return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
return price + ZSTD_getLiteralPrice(optPtr, litLength, literals) + optPtr->factor;
}
MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength)
MEM_STATIC void ZSTD_updatePrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength)
{
U32 u;
/* literals */
seqStorePtr->litSum += litLength*ZSTD_LITFREQ_ADD;
optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
for (u=0; u < litLength; u++)
seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
/* literal Length */
{ const BYTE LL_deltaCode = 19;
const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
seqStorePtr->litLengthFreq[llCode]++;
seqStorePtr->litLengthSum++;
optPtr->litLengthFreq[llCode]++;
optPtr->litLengthSum++;
}
/* match offset */
{ BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
seqStorePtr->offCodeSum++;
seqStorePtr->offCodeFreq[offCode]++;
optPtr->offCodeSum++;
optPtr->offCodeFreq[offCode]++;
}
/* match Length */
{ const BYTE ML_deltaCode = 36;
const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
seqStorePtr->matchLengthFreq[mlCode]++;
seqStorePtr->matchLengthSum++;
optPtr->matchLengthFreq[mlCode]++;
optPtr->matchLengthSum++;
}
ZSTD_setLog2Prices(seqStorePtr);
ZSTD_setLog2Prices(optPtr);
}
@ -417,6 +417,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
const void* src, size_t srcSize, const int ultra)
{
seqStore_t* seqStorePtr = &(ctx->seqStore);
optState_t* optStatePtr = &(ctx->optState);
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
@ -430,16 +431,16 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
const U32 mls = ctx->appliedParams.cParams.searchLength;
const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
ZSTD_optimal_t* opt = seqStorePtr->priceTable;
ZSTD_match_t* matches = seqStorePtr->matchTable;
ZSTD_optimal_t* opt = optStatePtr->priceTable;
ZSTD_match_t* matches = optStatePtr->matchTable;
const BYTE* inr;
U32 offset, rep[ZSTD_REP_NUM];
/* init */
ctx->nextToUpdate3 = ctx->nextToUpdate;
ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize);
ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
ip += (ip==prefixStart);
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; }
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
/* Match Loop */
while (ip < ilimit) {
@ -462,7 +463,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
}
best_off = i - (ip == anchor);
do {
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
if (mlen > last_pos || price < opt[mlen].price)
SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
mlen--;
@ -487,7 +488,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
best_mlen = matches[u].len;
while (mlen <= best_mlen) {
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
if (mlen > last_pos || price < opt[mlen].price)
SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
mlen++;
@ -507,12 +508,12 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
if (opt[cur-1].mlen == 1) {
litlen = opt[cur-1].litlen + 1;
if (cur > litlen) {
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen);
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen);
} else
price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor);
} else {
litlen = 1;
price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1);
price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1);
}
if (cur > last_pos || price <= opt[cur].price)
@ -554,12 +555,12 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
if (opt[cur].mlen == 1) {
litlen = opt[cur].litlen;
if (cur > litlen) {
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
} else
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
} else {
litlen = 0;
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
}
if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
@ -586,12 +587,12 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
if (opt[cur].mlen == 1) {
litlen = opt[cur].litlen;
if (cur > litlen)
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
else
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
} else {
litlen = 0;
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
}
if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
@ -645,13 +646,13 @@ _storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
if (litLength==0) offset--;
}
ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH);
ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
anchor = ip = ip + mlen;
} } /* for (cur=0; cur < last_pos; ) */
/* Save reps for next block */
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
/* Last Literals */
{ size_t const lastLLSize = iend - anchor;
@ -666,6 +667,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
const void* src, size_t srcSize, const int ultra)
{
seqStore_t* seqStorePtr = &(ctx->seqStore);
optState_t* optStatePtr = &(ctx->optState);
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
@ -683,16 +685,16 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
const U32 mls = ctx->appliedParams.cParams.searchLength;
const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4;
ZSTD_optimal_t* opt = seqStorePtr->priceTable;
ZSTD_match_t* matches = seqStorePtr->matchTable;
ZSTD_optimal_t* opt = optStatePtr->priceTable;
ZSTD_match_t* matches = optStatePtr->matchTable;
const BYTE* inr;
/* init */
U32 offset, rep[ZSTD_REP_NUM];
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; }
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=seqStorePtr->rep[i]; }
ctx->nextToUpdate3 = ctx->nextToUpdate;
ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize);
ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
ip += (ip==prefixStart);
/* Match Loop */
@ -726,7 +728,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
best_off = i - (ip==anchor);
litlen = opt[0].litlen;
do {
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
if (mlen > last_pos || price < opt[mlen].price)
SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
mlen--;
@ -756,7 +758,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
best_mlen = matches[u].len;
litlen = opt[0].litlen;
while (mlen <= best_mlen) {
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
if (mlen > last_pos || price < opt[mlen].price)
SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
mlen++;
@ -773,12 +775,12 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
if (opt[cur-1].mlen == 1) {
litlen = opt[cur-1].litlen + 1;
if (cur > litlen) {
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen);
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen);
} else
price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor);
} else {
litlen = 1;
price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1);
price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1);
}
if (cur > last_pos || price <= opt[cur].price)
@ -826,12 +828,12 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
if (opt[cur].mlen == 1) {
litlen = opt[cur].litlen;
if (cur > litlen) {
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
} else
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
} else {
litlen = 0;
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
}
if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
@ -858,12 +860,12 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
if (opt[cur].mlen == 1) {
litlen = opt[cur].litlen;
if (cur > litlen)
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
else
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
} else {
litlen = 0;
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
}
if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
@ -918,13 +920,13 @@ _storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
if (litLength==0) offset--;
}
ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH);
ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
anchor = ip = ip + mlen;
} } /* for (cur=0; cur < last_pos; ) */
/* Save reps for next block */
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->repToConfirm[i] = rep[i]; }
/* Last Literals */
{ size_t lastLLSize = iend - anchor;

View File

@ -9,7 +9,8 @@
/* ====== Tuning parameters ====== */
#define ZSTDMT_NBTHREADS_MAX 128
#define ZSTDMT_NBTHREADS_MAX 256
#define ZSTDMT_OVERLAPLOG_DEFAULT 6
/* ====== Compiler specifics ====== */
@ -73,6 +74,7 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void)
/* ===== Buffer Pool ===== */
/* a single Buffer Pool can be invoked from multiple threads in parallel */
typedef struct buffer_s {
void* start;
@ -82,6 +84,8 @@ typedef struct buffer_s {
static const buffer_t g_nullBuffer = { NULL, 0 };
typedef struct ZSTDMT_bufferPool_s {
pthread_mutex_t poolMutex;
size_t bufferSize;
unsigned totalBuffers;
unsigned nbBuffers;
ZSTD_customMem cMem;
@ -90,10 +94,15 @@ typedef struct ZSTDMT_bufferPool_s {
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads, ZSTD_customMem cMem)
{
unsigned const maxNbBuffers = 2*nbThreads + 2;
unsigned const maxNbBuffers = 2*nbThreads + 3;
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
if (bufPool==NULL) return NULL;
if (pthread_mutex_init(&bufPool->poolMutex, NULL)) {
ZSTD_free(bufPool, cMem);
return NULL;
}
bufPool->bufferSize = 64 KB;
bufPool->totalBuffers = maxNbBuffers;
bufPool->nbBuffers = 0;
bufPool->cMem = cMem;
@ -106,6 +115,7 @@ static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
if (!bufPool) return; /* compatibility with free on NULL */
for (u=0; u<bufPool->totalBuffers; u++)
ZSTD_free(bufPool->bTable[u].start, bufPool->cMem);
pthread_mutex_destroy(&bufPool->poolMutex);
ZSTD_free(bufPool, bufPool->cMem);
}
@ -116,65 +126,85 @@ static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
+ (bufPool->totalBuffers - 1) * sizeof(buffer_t);
unsigned u;
size_t totalBufferSize = 0;
pthread_mutex_lock(&bufPool->poolMutex);
for (u=0; u<bufPool->totalBuffers; u++)
totalBufferSize += bufPool->bTable[u].size;
pthread_mutex_unlock(&bufPool->poolMutex);
return poolSize + totalBufferSize;
}
/** ZSTDMT_getBuffer() :
* assumption : invocation from main thread only ! */
static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* pool, size_t bSize)
static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* bufPool, size_t bSize)
{
if (pool->nbBuffers) { /* try to use an existing buffer */
buffer_t const buf = pool->bTable[--(pool->nbBuffers)];
bufPool->bufferSize = bSize;
}
/** ZSTDMT_getBuffer() :
* assumption : bufPool must be valid */
static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
{
size_t const bSize = bufPool->bufferSize;
DEBUGLOG(5, "ZSTDMT_getBuffer");
pthread_mutex_lock(&bufPool->poolMutex);
if (bufPool->nbBuffers) { /* try to use an existing buffer */
buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
size_t const availBufferSize = buf.size;
if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize))
if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize)) {
/* large enough, but not too much */
pthread_mutex_unlock(&bufPool->poolMutex);
return buf;
}
/* size conditions not respected : scratch this buffer, create new one */
ZSTD_free(buf.start, pool->cMem);
DEBUGLOG(5, "existing buffer does not meet size conditions => freeing");
ZSTD_free(buf.start, bufPool->cMem);
}
pthread_mutex_unlock(&bufPool->poolMutex);
/* create new buffer */
DEBUGLOG(5, "create a new buffer");
{ buffer_t buffer;
void* const start = ZSTD_malloc(bSize, pool->cMem);
if (start==NULL) bSize = 0;
void* const start = ZSTD_malloc(bSize, bufPool->cMem);
buffer.start = start; /* note : start can be NULL if malloc fails ! */
buffer.size = bSize;
buffer.size = (start==NULL) ? 0 : bSize;
return buffer;
}
}
/* store buffer for later re-use, up to pool capacity */
static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* pool, buffer_t buf)
static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
{
if (buf.start == NULL) return; /* release on NULL */
if (pool->nbBuffers < pool->totalBuffers) {
pool->bTable[pool->nbBuffers++] = buf; /* store for later re-use */
if (buf.start == NULL) return; /* compatible with release on NULL */
DEBUGLOG(5, "ZSTDMT_releaseBuffer");
pthread_mutex_lock(&bufPool->poolMutex);
if (bufPool->nbBuffers < bufPool->totalBuffers) {
bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
pthread_mutex_unlock(&bufPool->poolMutex);
return;
}
pthread_mutex_unlock(&bufPool->poolMutex);
/* Reached bufferPool capacity (should not happen) */
ZSTD_free(buf.start, pool->cMem);
DEBUGLOG(5, "buffer pool capacity reached => freeing ");
ZSTD_free(buf.start, bufPool->cMem);
}
/* ===== CCtx Pool ===== */
/* a single CCtx Pool can be invoked from multiple threads in parallel */
typedef struct {
pthread_mutex_t poolMutex;
unsigned totalCCtx;
unsigned availCCtx;
ZSTD_customMem cMem;
ZSTD_CCtx* cctx[1]; /* variable size */
} ZSTDMT_CCtxPool;
/* assumption : CCtxPool invocation only from main thread */
/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
{
unsigned u;
for (u=0; u<pool->totalCCtx; u++)
ZSTD_freeCCtx(pool->cctx[u]); /* note : compatible with free on NULL */
pthread_mutex_destroy(&pool->poolMutex);
ZSTD_free(pool, pool->cMem);
}
@ -186,6 +216,10 @@ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads,
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
sizeof(ZSTDMT_CCtxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*), cMem);
if (!cctxPool) return NULL;
if (pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
ZSTD_free(cctxPool, cMem);
return NULL;
}
cctxPool->cMem = cMem;
cctxPool->totalCCtx = nbThreads;
cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
@ -198,50 +232,57 @@ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads,
/* only works during initialization phase, not during compression */
static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
{
unsigned const nbThreads = cctxPool->totalCCtx;
size_t const poolSize = sizeof(*cctxPool)
+ (nbThreads-1)*sizeof(ZSTD_CCtx*);
unsigned u;
size_t totalCCtxSize = 0;
for (u=0; u<nbThreads; u++)
totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
return poolSize + totalCCtxSize;
pthread_mutex_lock(&cctxPool->poolMutex);
{ unsigned const nbThreads = cctxPool->totalCCtx;
size_t const poolSize = sizeof(*cctxPool)
+ (nbThreads-1)*sizeof(ZSTD_CCtx*);
unsigned u;
size_t totalCCtxSize = 0;
for (u=0; u<nbThreads; u++) {
totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
}
pthread_mutex_unlock(&cctxPool->poolMutex);
return poolSize + totalCCtxSize;
}
}
static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* pool)
static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
{
if (pool->availCCtx) {
pool->availCCtx--;
return pool->cctx[pool->availCCtx];
}
return ZSTD_createCCtx(); /* note : can be NULL, when creation fails ! */
DEBUGLOG(5, "ZSTDMT_getCCtx");
pthread_mutex_lock(&cctxPool->poolMutex);
if (cctxPool->availCCtx) {
cctxPool->availCCtx--;
{ ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
pthread_mutex_unlock(&cctxPool->poolMutex);
return cctx;
} }
pthread_mutex_unlock(&cctxPool->poolMutex);
DEBUGLOG(5, "create one more CCtx");
return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
}
static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
{
if (cctx==NULL) return; /* compatibility with release on NULL */
pthread_mutex_lock(&pool->poolMutex);
if (pool->availCCtx < pool->totalCCtx)
pool->cctx[pool->availCCtx++] = cctx;
else
else {
/* pool overflow : should not happen, since totalCCtx==nbThreads */
DEBUGLOG(5, "CCtx pool overflow : free cctx");
ZSTD_freeCCtx(cctx);
}
pthread_mutex_unlock(&pool->poolMutex);
}
/* ===== Thread worker ===== */
typedef struct {
buffer_t buffer;
size_t filled;
} inBuff_t;
typedef struct {
ZSTD_CCtx* cctx;
buffer_t src;
const void* srcStart;
size_t srcSize;
size_t dictSize;
size_t srcSize;
buffer_t dstBuff;
size_t cSize;
size_t dstFlushed;
@ -253,6 +294,8 @@ typedef struct {
pthread_cond_t* jobCompleted_cond;
ZSTD_parameters params;
const ZSTD_CDict* cdict;
ZSTDMT_CCtxPool* cctxPool;
ZSTDMT_bufferPool* bufPool;
unsigned long long fullFrameSize;
} ZSTDMT_jobDescription;
@ -260,37 +303,56 @@ typedef struct {
void ZSTDMT_compressChunk(void* jobDescription)
{
ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
ZSTD_CCtx* cctx = ZSTDMT_getCCtx(job->cctxPool);
const void* const src = (const char*)job->srcStart + job->dictSize;
buffer_t const dstBuff = job->dstBuff;
buffer_t dstBuff = job->dstBuff;
DEBUGLOG(5, "job (first:%u) (last:%u) : dictSize %u, srcSize %u",
job->firstChunk, job->lastChunk, (U32)job->dictSize, (U32)job->srcSize);
if (cctx==NULL) {
job->cSize = ERROR(memory_allocation);
goto _endJob;
}
if (dstBuff.start == NULL) {
dstBuff = ZSTDMT_getBuffer(job->bufPool);
if (dstBuff.start==NULL) {
job->cSize = ERROR(memory_allocation);
goto _endJob;
}
job->dstBuff = dstBuff;
}
if (job->cdict) { /* should only happen for first segment */
size_t const initError = ZSTD_compressBegin_usingCDict_advanced(job->cctx, job->cdict, job->params.fParams, job->fullFrameSize);
size_t const initError = ZSTD_compressBegin_usingCDict_advanced(cctx, job->cdict, job->params.fParams, job->fullFrameSize);
DEBUGLOG(5, "using CDict");
if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
} else { /* srcStart points at reloaded section */
if (!job->firstChunk) job->params.fParams.contentSizeFlag = 0; /* ensure no srcSize control */
{ size_t const dictModeError = ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceRawDict, 1); /* Force loading dictionary in "content-only" mode (no header analysis) */
size_t const initError = ZSTD_compressBegin_advanced(job->cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize);
{ size_t const dictModeError = ZSTD_setCCtxParameter(cctx, ZSTD_p_forceRawDict, 1); /* Force loading dictionary in "content-only" mode (no header analysis) */
size_t const initError = ZSTD_compressBegin_advanced(cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize);
if (ZSTD_isError(initError) || ZSTD_isError(dictModeError)) { job->cSize = initError; goto _endJob; }
ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceWindow, 1);
ZSTD_setCCtxParameter(cctx, ZSTD_p_forceWindow, 1);
} }
if (!job->firstChunk) { /* flush and overwrite frame header when it's not first segment */
size_t const hSize = ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, 0);
size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, 0);
if (ZSTD_isError(hSize)) { job->cSize = hSize; goto _endJob; }
ZSTD_invalidateRepCodes(job->cctx);
ZSTD_invalidateRepCodes(cctx);
}
DEBUGLOG(5, "Compressing : ");
DEBUG_PRINTHEX(4, job->srcStart, 12);
job->cSize = (job->lastChunk) ?
ZSTD_compressEnd (job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize) :
ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize);
ZSTD_compressEnd (cctx, dstBuff.start, dstBuff.size, src, job->srcSize) :
ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, job->srcSize);
DEBUGLOG(5, "compressed %u bytes into %u bytes (first:%u) (last:%u)",
(unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk);
DEBUGLOG(5, "dstBuff.size : %u ; => %s", (U32)dstBuff.size, ZSTD_getErrorName(job->cSize));
_endJob:
ZSTDMT_releaseCCtx(job->cctxPool, cctx);
ZSTDMT_releaseBuffer(job->bufPool, job->src);
job->src = g_nullBuffer; job->srcStart = NULL;
PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex);
job->jobCompleted = 1;
job->jobScanned = 0;
@ -303,15 +365,19 @@ _endJob:
/* ===== Multi-threaded compression ===== */
/* ------------------------------------------ */
typedef struct {
buffer_t buffer;
size_t filled;
} inBuff_t;
struct ZSTDMT_CCtx_s {
POOL_ctx* factory;
ZSTDMT_jobDescription* jobs;
ZSTDMT_bufferPool* buffPool;
ZSTDMT_bufferPool* bufPool;
ZSTDMT_CCtxPool* cctxPool;
pthread_mutex_t jobCompleted_mutex;
pthread_cond_t jobCompleted_cond;
size_t targetSectionSize;
size_t marginSize;
size_t inBuffSize;
size_t dictSize;
size_t targetDictSize;
@ -324,7 +390,7 @@ struct ZSTDMT_CCtx_s {
unsigned nextJobID;
unsigned frameEnded;
unsigned allJobsCompleted;
unsigned overlapRLog;
unsigned overlapLog;
unsigned long long frameContentSize;
size_t sectionSize;
ZSTD_customMem cMem;
@ -347,7 +413,8 @@ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads, ZSTD_customMem cMem)
U32 nbJobs = nbThreads + 2;
DEBUGLOG(3, "ZSTDMT_createCCtx_advanced");
if ((nbThreads < 1) | (nbThreads > ZSTDMT_NBTHREADS_MAX)) return NULL;
if (nbThreads < 1) return NULL;
nbThreads = MIN(nbThreads , ZSTDMT_NBTHREADS_MAX);
if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
/* invalid custom allocator */
return NULL;
@ -358,18 +425,24 @@ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads, ZSTD_customMem cMem)
mtctx->nbThreads = nbThreads;
mtctx->allJobsCompleted = 1;
mtctx->sectionSize = 0;
mtctx->overlapRLog = 3;
mtctx->overlapLog = ZSTDMT_OVERLAPLOG_DEFAULT;
mtctx->factory = POOL_create(nbThreads, 1);
mtctx->jobs = ZSTDMT_allocJobsTable(&nbJobs, cMem);
mtctx->jobIDMask = nbJobs - 1;
mtctx->buffPool = ZSTDMT_createBufferPool(nbThreads, cMem);
mtctx->bufPool = ZSTDMT_createBufferPool(nbThreads, cMem);
mtctx->cctxPool = ZSTDMT_createCCtxPool(nbThreads, cMem);
if (!mtctx->factory | !mtctx->jobs | !mtctx->buffPool | !mtctx->cctxPool) {
if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool) {
ZSTDMT_freeCCtx(mtctx);
return NULL;
}
if (pthread_mutex_init(&mtctx->jobCompleted_mutex, NULL)) {
ZSTDMT_freeCCtx(mtctx);
return NULL;
}
if (pthread_cond_init(&mtctx->jobCompleted_cond, NULL)) {
ZSTDMT_freeCCtx(mtctx);
return NULL;
}
pthread_mutex_init(&mtctx->jobCompleted_mutex, NULL); /* Todo : check init function return */
pthread_cond_init(&mtctx->jobCompleted_cond, NULL);
DEBUGLOG(3, "mt_cctx created, for %u threads", nbThreads);
return mtctx;
}
@ -386,15 +459,13 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
unsigned jobID;
DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].dstBuff);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].src);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].src);
mtctx->jobs[jobID].src = g_nullBuffer;
ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[jobID].cctx);
mtctx->jobs[jobID].cctx = NULL;
}
memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->inBuff.buffer);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->inBuff.buffer);
mtctx->inBuff.buffer = g_nullBuffer;
mtctx->allJobsCompleted = 1;
}
@ -404,7 +475,7 @@ size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
if (mtctx==NULL) return 0; /* compatible with free on NULL */
POOL_free(mtctx->factory);
if (!mtctx->allJobsCompleted) ZSTDMT_releaseAllJobResources(mtctx); /* stop workers first */
ZSTDMT_freeBufferPool(mtctx->buffPool); /* release job resources into pools first */
ZSTDMT_freeBufferPool(mtctx->bufPool); /* release job resources into pools first */
ZSTD_free(mtctx->jobs, mtctx->cMem);
ZSTDMT_freeCCtxPool(mtctx->cctxPool);
ZSTD_freeCDict(mtctx->cdictLocal);
@ -418,11 +489,11 @@ size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
{
if (mtctx == NULL) return 0; /* supports sizeof NULL */
return sizeof(*mtctx)
+ POOL_sizeof(mtctx->factory)
+ ZSTDMT_sizeof_bufferPool(mtctx->buffPool)
+ (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
+ ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
+ ZSTD_sizeof_CDict(mtctx->cdictLocal);
+ POOL_sizeof(mtctx->factory)
+ ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
+ (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
+ ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
+ ZSTD_sizeof_CDict(mtctx->cdictLocal);
}
size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value)
@ -434,10 +505,10 @@ size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter,
return 0;
case ZSTDMT_p_overlapSectionLog :
DEBUGLOG(5, "ZSTDMT_p_overlapSectionLog : %u", value);
mtctx->overlapRLog = (value >= 9) ? 0 : 9 - value;
mtctx->overlapLog = (value >= 9) ? 9 : value;
return 0;
default :
return ERROR(compressionParameter_unsupported);
return ERROR(parameter_unsupported);
}
}
@ -459,12 +530,13 @@ static unsigned computeNbChunks(size_t srcSize, unsigned windowLog, unsigned nbT
size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict,
ZSTD_parameters const params,
unsigned overlapRLog)
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict,
ZSTD_parameters const params,
unsigned overlapLog)
{
unsigned const overlapRLog = (overlapLog>9) ? 0 : 9-overlapLog;
size_t const overlapSize = (overlapRLog>=9) ? 0 : (size_t)1 << (params.cParams.windowLog - overlapRLog);
unsigned nbChunks = computeNbChunks(srcSize, params.cParams.windowLog, mtctx->nbThreads);
size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks;
@ -473,6 +545,7 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
size_t remainingSrcSize = srcSize;
unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbChunks : (unsigned)(dstCapacity / ZSTD_compressBound(avgChunkSize)); /* presumes avgChunkSize >= 256 KB, which should be the case */
size_t frameStartPos = 0, dstBufferPos = 0;
XXH64_state_t xxh64;
DEBUGLOG(4, "nbChunks : %2u (chunkSize : %u bytes) ", nbChunks, (U32)avgChunkSize);
if (nbChunks==1) { /* fallback to single-thread mode */
@ -480,7 +553,9 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, params.fParams);
return ZSTD_compress_advanced(cctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
}
assert(avgChunkSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), which is useful to avoid allocating extra buffers */
assert(avgChunkSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), which is required for compressWithinDst */
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgChunkSize) );
XXH64_reset(&xxh64, 0);
if (nbChunks > mtctx->jobIDMask+1) { /* enlarge job table */
U32 nbJobs = nbChunks;
@ -496,17 +571,10 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
size_t const chunkSize = MIN(remainingSrcSize, avgChunkSize);
size_t const dstBufferCapacity = ZSTD_compressBound(chunkSize);
buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : ZSTDMT_getBuffer(mtctx->buffPool, dstBufferCapacity);
ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(mtctx->cctxPool);
buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;
size_t dictSize = u ? overlapSize : 0;
if ((cctx==NULL) || (dstBuffer.start==NULL)) {
mtctx->jobs[u].cSize = ERROR(memory_allocation); /* job result */
mtctx->jobs[u].jobCompleted = 1;
nbChunks = u+1; /* only wait and free u jobs, instead of initially expected nbChunks ones */
break; /* let's wait for previous jobs to complete, but don't start new ones */
}
mtctx->jobs[u].src = g_nullBuffer;
mtctx->jobs[u].srcStart = srcStart + frameStartPos - dictSize;
mtctx->jobs[u].dictSize = dictSize;
mtctx->jobs[u].srcSize = chunkSize;
@ -516,13 +584,18 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
/* do not calculate checksum within sections, but write it in header for first section */
if (u!=0) mtctx->jobs[u].params.fParams.checksumFlag = 0;
mtctx->jobs[u].dstBuff = dstBuffer;
mtctx->jobs[u].cctx = cctx;
mtctx->jobs[u].cctxPool = mtctx->cctxPool;
mtctx->jobs[u].bufPool = mtctx->bufPool;
mtctx->jobs[u].firstChunk = (u==0);
mtctx->jobs[u].lastChunk = (u==nbChunks-1);
mtctx->jobs[u].jobCompleted = 0;
mtctx->jobs[u].jobCompleted_mutex = &mtctx->jobCompleted_mutex;
mtctx->jobs[u].jobCompleted_cond = &mtctx->jobCompleted_cond;
if (params.fParams.checksumFlag) {
XXH64_update(&xxh64, srcStart + frameStartPos, chunkSize);
}
DEBUGLOG(5, "posting job %u (%u bytes)", u, (U32)chunkSize);
DEBUG_PRINTHEX(6, mtctx->jobs[u].srcStart, 12);
POOL_add(mtctx->factory, ZSTDMT_compressChunk, &mtctx->jobs[u]);
@ -533,8 +606,8 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
} }
/* collect result */
{ unsigned chunkID;
size_t error = 0, dstPos = 0;
{ size_t error = 0, dstPos = 0;
unsigned chunkID;
for (chunkID=0; chunkID<nbChunks; chunkID++) {
DEBUGLOG(5, "waiting for chunk %u ", chunkID);
PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex);
@ -545,8 +618,6 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
pthread_mutex_unlock(&mtctx->jobCompleted_mutex);
DEBUGLOG(5, "ready to write chunk %u ", chunkID);
ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[chunkID].cctx);
mtctx->jobs[chunkID].cctx = NULL;
mtctx->jobs[chunkID].srcStart = NULL;
{ size_t const cSize = mtctx->jobs[chunkID].cSize;
if (ZSTD_isError(cSize)) error = cSize;
@ -556,13 +627,25 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
memmove((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize); /* may overlap when chunk compressed within dst */
if (chunkID >= compressWithinDst) { /* chunk compressed into its own buffer, which must be released */
DEBUGLOG(5, "releasing buffer %u>=%u", chunkID, compressWithinDst);
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[chunkID].dstBuff);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[chunkID].dstBuff);
}
mtctx->jobs[chunkID].dstBuff = g_nullBuffer;
}
dstPos += cSize ;
}
}
} /* for (chunkID=0; chunkID<nbChunks; chunkID++) */
DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag);
if (params.fParams.checksumFlag) {
U32 const checksum = (U32)XXH64_digest(&xxh64);
if (dstPos + 4 > dstCapacity) {
error = ERROR(dstSize_tooSmall);
} else {
DEBUGLOG(4, "writing checksum : %08X \n", checksum);
MEM_writeLE32((char*)dst + dstPos, checksum);
dstPos += 4;
} }
if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos);
return error ? error : dstPos;
}
@ -574,10 +657,10 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
const void* src, size_t srcSize,
int compressionLevel)
{
U32 const overlapRLog = (compressionLevel >= ZSTD_maxCLevel()) ? 0 : 3;
U32 const overlapLog = (compressionLevel >= ZSTD_maxCLevel()) ? 9 : ZSTDMT_OVERLAPLOG_DEFAULT;
ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
params.fParams.contentSizeFlag = 1;
return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapRLog);
return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
}
@ -615,8 +698,8 @@ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
if (zcs->nbThreads==1) {
DEBUGLOG(4, "single thread mode");
return ZSTD_initCStream_internal(zcs->cctxPool->cctx[0],
dict, dictSize, cdict,
params, pledgedSrcSize);
dict, dictSize, cdict,
params, pledgedSrcSize);
}
if (zcs->allJobsCompleted == 0) { /* previous compression not correctly finished */
@ -642,18 +725,16 @@ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
zcs->cdict = cdict;
}
zcs->targetDictSize = (zcs->overlapRLog>=9) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - zcs->overlapRLog);
DEBUGLOG(4, "overlapRLog : %u ", zcs->overlapRLog);
zcs->targetDictSize = (zcs->overlapLog==0) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - (9 - zcs->overlapLog));
DEBUGLOG(4, "overlapLog : %u ", zcs->overlapLog);
DEBUGLOG(4, "overlap Size : %u KB", (U32)(zcs->targetDictSize>>10));
zcs->targetSectionSize = zcs->sectionSize ? zcs->sectionSize : (size_t)1 << (zcs->params.cParams.windowLog + 2);
zcs->targetSectionSize = MAX(ZSTDMT_SECTION_SIZE_MIN, zcs->targetSectionSize);
zcs->targetSectionSize = MAX(zcs->targetDictSize, zcs->targetSectionSize);
DEBUGLOG(4, "Section Size : %u KB", (U32)(zcs->targetSectionSize>>10));
zcs->marginSize = zcs->targetSectionSize >> 2;
zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize + zcs->marginSize;
zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize);
if (zcs->inBuff.buffer.start == NULL) return ERROR(memory_allocation);
zcs->inBuff.filled = 0;
zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize;
ZSTDMT_setBufferSize(zcs->bufPool, MAX(zcs->inBuffSize, ZSTD_compressBound(zcs->targetSectionSize)) );
zcs->inBuff.buffer = g_nullBuffer;
zcs->dictSize = 0;
zcs->doneJobID = 0;
zcs->nextJobID = 0;
@ -664,8 +745,9 @@ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
}
size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize)
const void* dict, size_t dictSize,
ZSTD_parameters params,
unsigned long long pledgedSrcSize)
{
DEBUGLOG(5, "ZSTDMT_initCStream_advanced");
return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, NULL, params, pledgedSrcSize);
@ -701,19 +783,8 @@ size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) {
static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsigned endFrame)
{
size_t const dstBufferCapacity = ZSTD_compressBound(srcSize);
buffer_t const dstBuffer = ZSTDMT_getBuffer(zcs->buffPool, dstBufferCapacity);
ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(zcs->cctxPool);
unsigned const jobID = zcs->nextJobID & zcs->jobIDMask;
if ((cctx==NULL) || (dstBuffer.start==NULL)) {
zcs->jobs[jobID].jobCompleted = 1;
zcs->nextJobID++;
ZSTDMT_waitForAllJobsCompleted(zcs);
ZSTDMT_releaseAllJobResources(zcs);
return ERROR(memory_allocation);
}
DEBUGLOG(4, "preparing job %u to compress %u bytes with %u preload ",
zcs->nextJobID, (U32)srcSize, (U32)zcs->dictSize);
zcs->jobs[jobID].src = zcs->inBuff.buffer;
@ -726,8 +797,9 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0;
zcs->jobs[jobID].cdict = zcs->nextJobID==0 ? zcs->cdict : NULL;
zcs->jobs[jobID].fullFrameSize = zcs->frameContentSize;
zcs->jobs[jobID].dstBuff = dstBuffer;
zcs->jobs[jobID].cctx = cctx;
zcs->jobs[jobID].dstBuff = g_nullBuffer;
zcs->jobs[jobID].cctxPool = zcs->cctxPool;
zcs->jobs[jobID].bufPool = zcs->bufPool;
zcs->jobs[jobID].firstChunk = (zcs->nextJobID==0);
zcs->jobs[jobID].lastChunk = endFrame;
zcs->jobs[jobID].jobCompleted = 0;
@ -735,11 +807,13 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
zcs->jobs[jobID].jobCompleted_mutex = &zcs->jobCompleted_mutex;
zcs->jobs[jobID].jobCompleted_cond = &zcs->jobCompleted_cond;
if (zcs->params.fParams.checksumFlag)
XXH64_update(&zcs->xxhState, (const char*)zcs->inBuff.buffer.start + zcs->dictSize, srcSize);
/* get a new buffer for next input */
if (!endFrame) {
size_t const newDictSize = MIN(srcSize + zcs->dictSize, zcs->targetDictSize);
DEBUGLOG(5, "ZSTDMT_createCompressionJob::endFrame = %u", endFrame);
zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize);
zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->bufPool);
if (zcs->inBuff.buffer.start == NULL) { /* not enough memory to allocate next input buffer */
zcs->jobs[jobID].jobCompleted = 1;
zcs->nextJobID++;
@ -747,26 +821,20 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsi
ZSTDMT_releaseAllJobResources(zcs);
return ERROR(memory_allocation);
}
DEBUGLOG(5, "inBuff currently filled to %u", (U32)zcs->inBuff.filled);
zcs->inBuff.filled -= srcSize + zcs->dictSize - newDictSize;
DEBUGLOG(5, "new job : inBuff filled to %u, with %u dict and %u src",
(U32)zcs->inBuff.filled, (U32)newDictSize,
(U32)(zcs->inBuff.filled - newDictSize));
memmove(zcs->inBuff.buffer.start,
(const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize,
zcs->inBuff.filled);
DEBUGLOG(5, "new inBuff pre-filled");
zcs->dictSize = newDictSize;
} else { /* if (endFrame==1) */
DEBUGLOG(5, "ZSTDMT_createCompressionJob::endFrame = %u", endFrame);
zcs->inBuff.buffer = g_nullBuffer;
zcs->inBuff.filled = 0;
zcs->dictSize = 0;
zcs->frameEnded = 1;
if (zcs->nextJobID == 0)
if (zcs->nextJobID == 0) {
/* single chunk exception : checksum is calculated directly within worker thread */
zcs->params.fParams.checksumFlag = 0;
}
} }
DEBUGLOG(4, "posting job %u : %u bytes (end:%u) (note : doneJob = %u=>%u)",
zcs->nextJobID,
@ -804,11 +872,8 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
ZSTDMT_releaseAllJobResources(zcs);
return job.cSize;
}
ZSTDMT_releaseCCtx(zcs->cctxPool, job.cctx);
zcs->jobs[wJobID].cctx = NULL;
DEBUGLOG(5, "zcs->params.fParams.checksumFlag : %u ", zcs->params.fParams.checksumFlag);
if (zcs->params.fParams.checksumFlag) {
XXH64_update(&zcs->xxhState, (const char*)job.srcStart + job.dictSize, job.srcSize);
if (zcs->frameEnded && (zcs->doneJobID+1 == zcs->nextJobID)) { /* write checksum at end of last section */
U32 const checksum = (U32)XXH64_digest(&zcs->xxhState);
DEBUGLOG(5, "writing checksum : %08X \n", checksum);
@ -816,9 +881,6 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
job.cSize += 4;
zcs->jobs[wJobID].cSize += 4;
} }
ZSTDMT_releaseBuffer(zcs->buffPool, job.src);
zcs->jobs[wJobID].srcStart = NULL;
zcs->jobs[wJobID].src = g_nullBuffer;
zcs->jobs[wJobID].jobScanned = 1;
}
{ size_t const toWrite = MIN(job.cSize - job.dstFlushed, output->size - output->pos);
@ -828,7 +890,7 @@ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsi
job.dstFlushed += toWrite;
}
if (job.dstFlushed == job.cSize) { /* output buffer fully flushed => move to next one */
ZSTDMT_releaseBuffer(zcs->buffPool, job.dstBuff);
ZSTDMT_releaseBuffer(zcs->bufPool, job.dstBuff);
zcs->jobs[wJobID].dstBuff = g_nullBuffer;
zcs->jobs[wJobID].jobCompleted = 0;
zcs->doneJobID++;
@ -852,18 +914,18 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp)
{
size_t const newJobThreshold = mtctx->dictSize + mtctx->targetSectionSize + mtctx->marginSize;
size_t const newJobThreshold = mtctx->dictSize + mtctx->targetSectionSize;
assert(output->pos <= output->size);
assert(input->pos <= input->size);
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
/* current frame being ended. Only flush/end are allowed. Or start new frame with init */
return ERROR(stage_wrong);
}
if (mtctx->nbThreads==1) {
if (mtctx->nbThreads==1) { /* delegate to single-thread (synchronous) */
return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
}
/* single-pass shortcut (note : this is blocking-mode) */
/* single-pass shortcut (note : this is synchronous-mode) */
if ( (mtctx->nextJobID==0) /* just started */
&& (mtctx->inBuff.filled==0) /* nothing buffered */
&& (endOp==ZSTD_e_end) /* end order */
@ -871,24 +933,29 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
size_t const cSize = ZSTDMT_compress_advanced(mtctx,
(char*)output->dst + output->pos, output->size - output->pos,
(const char*)input->src + input->pos, input->size - input->pos,
mtctx->cdict, mtctx->params, mtctx->overlapRLog);
mtctx->cdict, mtctx->params, mtctx->overlapLog);
if (ZSTD_isError(cSize)) return cSize;
input->pos = input->size;
output->pos += cSize;
ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->inBuff.buffer); /* was allocated in initStream */
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->inBuff.buffer); /* was allocated in initStream */
mtctx->allJobsCompleted = 1;
mtctx->frameEnded = 1;
return 0;
}
/* fill input buffer */
if ((input->src) && (mtctx->inBuff.buffer.start)) { /* support NULL input */
size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled);
DEBUGLOG(2, "inBuff:%08X; inBuffSize=%u; ToCopy=%u", (U32)(size_t)mtctx->inBuff.buffer.start, (U32)mtctx->inBuffSize, (U32)toLoad);
memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
input->pos += toLoad;
mtctx->inBuff.filled += toLoad;
}
if (input->size > input->pos) { /* support NULL input */
if (mtctx->inBuff.buffer.start == NULL) {
mtctx->inBuff.buffer = ZSTDMT_getBuffer(mtctx->bufPool);
if (mtctx->inBuff.buffer.start == NULL) return ERROR(memory_allocation);
mtctx->inBuff.filled = 0;
}
{ size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled);
DEBUGLOG(5, "inBuff:%08X; inBuffSize=%u; ToCopy=%u", (U32)(size_t)mtctx->inBuff.buffer.start, (U32)mtctx->inBuffSize, (U32)toLoad);
memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
input->pos += toLoad;
mtctx->inBuff.filled += toLoad;
} }
if ( (mtctx->inBuff.filled >= newJobThreshold) /* filled enough : let's compress */
&& (mtctx->nextJobID <= mtctx->doneJobID + mtctx->jobIDMask) ) { /* avoid overwriting job round buffer */

View File

@ -16,8 +16,8 @@
/* Note : This is an internal API.
* Some methods are still exposed (ZSTDLIB_API), because for some time,
* it used to be the only way to invoke MT compression.
* Some methods are still exposed (ZSTDLIB_API),
* because it used to be the only way to invoke MT compression.
* Now, it's recommended to use ZSTD_compress_generic() instead.
* These methods will stop being exposed in a future version */
@ -68,7 +68,7 @@ ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict,
ZSTD_parameters const params,
unsigned overlapRLog);
unsigned overlapLog);
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */

View File

@ -95,7 +95,7 @@ typedef struct {
HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
U32 rep[ZSTD_REP_NUM];
} ZSTD_entropyTables_t;
} ZSTD_entropyDTables_t;
struct ZSTD_DCtx_s
{
@ -103,7 +103,7 @@ struct ZSTD_DCtx_s
const FSE_DTable* MLTptr;
const FSE_DTable* OFTptr;
const HUF_DTable* HUFptr;
ZSTD_entropyTables_t entropy;
ZSTD_entropyDTables_t entropy;
const void* previousDstEnd; /* detect continuity */
const void* base; /* start of current segment */
const void* vBase; /* virtual start of previous segment if it was just before current one */
@ -1842,7 +1842,7 @@ static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dict
/* ZSTD_loadEntropy() :
* dict : must point at beginning of a valid zstd dictionary
* @return : size of entropy tables read */
static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const dict, size_t const dictSize)
static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize)
{
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
@ -1933,7 +1933,7 @@ struct ZSTD_DDict_s {
void* dictBuffer;
const void* dictContent;
size_t dictSize;
ZSTD_entropyTables_t entropy;
ZSTD_entropyDTables_t entropy;
U32 dictID;
U32 entropyPresent;
ZSTD_customMem cMem;
@ -2227,7 +2227,7 @@ size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds,
{
switch(paramType)
{
default : return ERROR(parameter_unknown);
default : return ERROR(parameter_unsupported);
case DStream_p_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break;
}
return 0;

View File

@ -714,11 +714,9 @@ typedef struct COVER_best_s {
* Initialize the `COVER_best_t`.
*/
static void COVER_best_init(COVER_best_t *best) {
if (!best) {
return;
}
pthread_mutex_init(&best->mutex, NULL);
pthread_cond_init(&best->cond, NULL);
if (best==NULL) return; /* compatible with init on NULL */
(void)pthread_mutex_init(&best->mutex, NULL);
(void)pthread_cond_init(&best->cond, NULL);
best->liveJobs = 0;
best->dict = NULL;
best->dictSize = 0;

View File

@ -695,7 +695,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
DISPLAYLEVEL(1, "Not enough memory \n");
goto _cleanup;
}
if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionary_wrong); goto _cleanup; } /* too large dictionary */
if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */
for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */
for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;

View File

@ -2776,7 +2776,7 @@ static size_t ZSTD_decodeFrameHeader_Part2(ZSTD_DCtx* zc, const void* src, size_
size_t result;
if (srcSize != zc->headerSize) return ERROR(srcSize_wrong);
result = ZSTD_getFrameParams(&(zc->params), src, srcSize);
if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupportedBy32bits);
if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
return result;
}

View File

@ -2888,7 +2888,7 @@ static size_t ZSTDv05_decodeFrameHeader_Part2(ZSTDv05_DCtx* zc, const void* src,
if (srcSize != zc->headerSize)
return ERROR(srcSize_wrong);
result = ZSTDv05_getFrameParams(&(zc->params), src, srcSize);
if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupportedBy32bits);
if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
return result;
}

View File

@ -3084,7 +3084,7 @@ size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src,
static size_t ZSTDv06_decodeFrameHeader(ZSTDv06_DCtx* zc, const void* src, size_t srcSize)
{
size_t const result = ZSTDv06_getFrameParams(&(zc->fParams), src, srcSize);
if ((MEM_32bits()) && (zc->fParams.windowLog > 25)) return ERROR(frameParameter_unsupportedBy32bits);
if ((MEM_32bits()) && (zc->fParams.windowLog > 25)) return ERROR(frameParameter_unsupported);
return result;
}

View File

@ -954,7 +954,9 @@ typedef enum {
* Special: value 0 means "do not change strategy". */
/* frame parameters */
ZSTD_p_contentSizeFlag=200, /* Content size is written into frame header _whenever known_ (default:1) */
ZSTD_p_contentSizeFlag=200, /* Content size is written into frame header _whenever known_ (default:1)
* note that content size must be known at the beginning,
* it is sent using ZSTD_CCtx_setPledgedSrcSize() */
ZSTD_p_checksumFlag, /* A 32-bits checksum of content is written at end of frame (default:0) */
ZSTD_p_dictIDFlag, /* When applicable, dictID of dictionary is provided in frame header (default:1) */

View File

@ -1,5 +1,5 @@
.
.TH "ZSTD" "1" "June 2017" "zstd 1.3.0" "User Commands"
.TH "ZSTD" "1" "July 2017" "zstd 1.3.1" "User Commands"
.
.SH "NAME"
\fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files
@ -105,7 +105,7 @@ unlocks high compression levels 20+ (maximum 22), using a lot more memory\. Note
.
.TP
\fB\-T#\fR, \fB\-\-threads=#\fR
Compress using \fB#\fR threads (default: 1)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\.
Compress using \fB#\fR threads (default: 1)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. In all cases, the nb of threads is capped to ZSTDMT_NBTHREADS_MAX==256\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\.
.
.TP
\fB\-D file\fR

View File

@ -108,6 +108,7 @@ the last one takes effect.
* `-T#`, `--threads=#`:
Compress using `#` threads (default: 1).
If `#` is 0, attempt to detect and use the number of physical CPU cores.
In all cases, the nb of threads is capped to ZSTDMT_NBTHREADS_MAX==256.
This modifier does nothing if `zstd` is compiled without multithread support.
* `-D file`:
use `file` as Dictionary to compress or decompress FILE(s)

View File

@ -719,6 +719,10 @@ int main(int argCount, const char* argv[])
goto _end;
}
#ifndef ZSTD_NODECOMPRESS
if (operation==zom_test) { outFileName=nulmark; FIO_setRemoveSrcFile(0); } /* test mode */
#endif
/* No input filename ==> use stdin and stdout */
filenameIdx += !filenameIdx; /* filenameTable[0] is stdin by default */
if (!strcmp(filenameTable[0], stdinmark) && !outFileName) outFileName = stdoutmark; /* when input is stdin, default output is stdout */
@ -763,7 +767,6 @@ int main(int argCount, const char* argv[])
#endif
} else { /* decompression or test */
#ifndef ZSTD_NODECOMPRESS
if (operation==zom_test) { outFileName=nulmark; FIO_setRemoveSrcFile(0); } /* test mode */
FIO_setMemLimit(memLimit);
if (filenameIdx==1 && outFileName)
operationResult = FIO_decompressFilename(outFileName, filenameTable[0], dictFileName);

View File

@ -79,7 +79,7 @@ all32: fullbench32 fuzzer32 zstreamtest32
allnothread: fullbench fuzzer paramgrill datagen decodecorpus
dll: fuzzer-dll zstreamtest-dll
dll: fuzzer-dll zstreamtest-dll
zstd:
$(MAKE) -C $(PRGDIR) $@
@ -108,11 +108,11 @@ fullbench-dll: $(PRGDIR)/datagen.c fullbench.c
$(MAKE) -C $(ZSTDDIR) libzstd
$(CC) $(FLAGS) $^ -o $@$(EXT) -DZSTD_DLL_IMPORT=1 $(ZSTDDIR)/dll/libzstd.dll
fuzzer : $(ZSTD_FILES) $(ZDICT_FILES) $(PRGDIR)/datagen.c fuzzer.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
fuzzer32 : $(ZSTD_FILES) $(ZDICT_FILES) $(PRGDIR)/datagen.c fuzzer.c
$(CC) -m32 $(FLAGS) $^ -o $@$(EXT)
fuzzer : CPPFLAGS += $(MULTITHREAD_CPP)
fuzzer : LDFLAGS += $(MULTITHREAD_LD)
fuzzer32: CFLAGS += -m32
fuzzer fuzzer32 : $(ZSTD_FILES) $(ZDICT_FILES) $(PRGDIR)/datagen.c fuzzer.c
$(CC) $(FLAGS) $^ -o $@$(EXT)
fuzzer-dll : LDFLAGS+= -L$(ZSTDDIR) -lzstd
fuzzer-dll : $(ZSTDDIR)/common/xxhash.c $(PRGDIR)/datagen.c fuzzer.c
@ -192,7 +192,7 @@ else
$(CC) $(FLAGS) $^ -o $@$(EXT) -Wl,-rpath=$(ZSTDDIR) $(ZSTDDIR)/libzstd.so
endif
pool : pool.c $(ZSTDDIR)/common/pool.c $(ZSTDDIR)/common/threading.c
poolTests : poolTests.c $(ZSTDDIR)/common/pool.c $(ZSTDDIR)/common/threading.c
$(CC) $(FLAGS) $(MULTITHREAD) $^ -o $@$(EXT)
namespaceTest:
@ -213,7 +213,7 @@ clean:
fuzzer-dll$(EXT) zstreamtest-dll$(EXT) zbufftest-dll$(EXT)\
zstreamtest$(EXT) zstreamtest32$(EXT) \
datagen$(EXT) paramgrill$(EXT) roundTripCrash$(EXT) longmatch$(EXT) \
symbols$(EXT) invalidDictionaries$(EXT) legacy$(EXT) pool$(EXT) \
symbols$(EXT) invalidDictionaries$(EXT) legacy$(EXT) poolTests$(EXT) \
decodecorpus$(EXT)
@echo Cleaning completed
@ -375,7 +375,7 @@ test-decodecorpus-cli: decodecorpus
cd ..
@rm -rf testdir
test-pool: pool
$(QEMU_SYS) ./pool
test-pool: poolTests
$(QEMU_SYS) ./poolTests
endif

View File

@ -51,14 +51,14 @@ static const U32 nbTestsDefault = 30000;
/*-************************************
* Display Macros
**************************************/
#define DISPLAY(...) fprintf(stderr, __VA_ARGS__)
#define DISPLAY(...) fprintf(stdout, __VA_ARGS__)
#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
static U32 g_displayLevel = 2;
#define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
if ((FUZ_clockSpan(g_displayClock) > g_refreshRate) || (g_displayLevel>=4)) \
{ g_displayClock = clock(); DISPLAY(__VA_ARGS__); \
if (g_displayLevel>=4) fflush(stderr); } }
if (g_displayLevel>=4) fflush(stdout); } }
static const clock_t g_refreshRate = CLOCKS_PER_SEC / 6;
static clock_t g_displayClock = 0;
@ -97,7 +97,165 @@ static unsigned FUZ_highbit32(U32 v32)
/*=============================================
* Basic Unit tests
* Memory Tests
=============================================*/
#if defined(__APPLE__) && defined(__MACH__)
#include <malloc/malloc.h> /* malloc_size */
typedef struct {
unsigned long long totalMalloc;
size_t currentMalloc;
size_t peakMalloc;
unsigned nbMalloc;
unsigned nbFree;
} mallocCounter_t;
static const mallocCounter_t INIT_MALLOC_COUNTER = { 0, 0, 0, 0, 0 };
static void* FUZ_mallocDebug(void* counter, size_t size)
{
mallocCounter_t* const mcPtr = (mallocCounter_t*)counter;
void* const ptr = malloc(size);
if (ptr==NULL) return NULL;
DISPLAYLEVEL(4, "allocating %u KB => effectively %u KB \n",
(U32)(size >> 10), (U32)(malloc_size(ptr) >> 10)); /* OS-X specific */
mcPtr->totalMalloc += size;
mcPtr->currentMalloc += size;
if (mcPtr->currentMalloc > mcPtr->peakMalloc)
mcPtr->peakMalloc = mcPtr->currentMalloc;
mcPtr->nbMalloc += 1;
return ptr;
}
static void FUZ_freeDebug(void* counter, void* address)
{
mallocCounter_t* const mcPtr = (mallocCounter_t*)counter;
DISPLAYLEVEL(4, "freeing %u KB \n", (U32)(malloc_size(address) >> 10));
mcPtr->nbFree += 1;
mcPtr->currentMalloc -= malloc_size(address); /* OS-X specific */
free(address);
}
static void FUZ_displayMallocStats(mallocCounter_t count)
{
DISPLAYLEVEL(3, "peak:%6u KB, nbMallocs:%2u, total:%6u KB \n",
(U32)(count.peakMalloc >> 10),
count.nbMalloc,
(U32)(count.totalMalloc >> 10));
}
#define CHECK_Z(f) { \
size_t const err = f; \
if (ZSTD_isError(err)) { \
DISPLAY("Error => %s : %s ", \
#f, ZSTD_getErrorName(err)); \
exit(1); \
} }
static int FUZ_mallocTests(unsigned seed, double compressibility, unsigned part)
{
size_t const inSize = 64 MB + 16 MB + 4 MB + 1 MB + 256 KB + 64 KB; /* 85.3 MB */
size_t const outSize = ZSTD_compressBound(inSize);
void* const inBuffer = malloc(inSize);
void* const outBuffer = malloc(outSize);
/* test only played in verbose mode, as they are long */
if (g_displayLevel<3) return 0;
/* Create compressible noise */
if (!inBuffer || !outBuffer) {
DISPLAY("Not enough memory, aborting\n");
exit(1);
}
RDG_genBuffer(inBuffer, inSize, compressibility, 0. /*auto*/, seed);
/* simple compression tests */
if (part <= 1)
{ int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
mallocCounter_t malcount = INIT_MALLOC_COUNTER;
ZSTD_customMem const cMem = { FUZ_mallocDebug, FUZ_freeDebug, &malcount };
ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(cMem);
CHECK_Z( ZSTD_compressCCtx(cctx, outBuffer, outSize, inBuffer, inSize, compressionLevel) );
ZSTD_freeCCtx(cctx);
DISPLAYLEVEL(3, "compressCCtx level %i : ", compressionLevel);
FUZ_displayMallocStats(malcount);
} }
/* streaming compression tests */
if (part <= 2)
{ int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
mallocCounter_t malcount = INIT_MALLOC_COUNTER;
ZSTD_customMem const cMem = { FUZ_mallocDebug, FUZ_freeDebug, &malcount };
ZSTD_CCtx* const cstream = ZSTD_createCStream_advanced(cMem);
ZSTD_outBuffer out = { outBuffer, outSize, 0 };
ZSTD_inBuffer in = { inBuffer, inSize, 0 };
CHECK_Z( ZSTD_initCStream(cstream, compressionLevel) );
CHECK_Z( ZSTD_compressStream(cstream, &out, &in) );
CHECK_Z( ZSTD_endStream(cstream, &out) );
ZSTD_freeCStream(cstream);
DISPLAYLEVEL(3, "compressStream level %i : ", compressionLevel);
FUZ_displayMallocStats(malcount);
} }
/* advanced MT API test */
if (part <= 3)
{ U32 nbThreads;
for (nbThreads=1; nbThreads<=4; nbThreads++) {
int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
mallocCounter_t malcount = INIT_MALLOC_COUNTER;
ZSTD_customMem const cMem = { FUZ_mallocDebug, FUZ_freeDebug, &malcount };
ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(cMem);
ZSTD_outBuffer out = { outBuffer, outSize, 0 };
ZSTD_inBuffer in = { inBuffer, inSize, 0 };
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_p_compressionLevel, (U32)compressionLevel) );
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_p_nbThreads, nbThreads) );
while ( ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_end) ) {}
ZSTD_freeCCtx(cctx);
DISPLAYLEVEL(3, "compress_generic,-T%u,end level %i : ",
nbThreads, compressionLevel);
FUZ_displayMallocStats(malcount);
} } }
/* advanced MT streaming API test */
if (part <= 4)
{ U32 nbThreads;
for (nbThreads=1; nbThreads<=4; nbThreads++) {
int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
mallocCounter_t malcount = INIT_MALLOC_COUNTER;
ZSTD_customMem const cMem = { FUZ_mallocDebug, FUZ_freeDebug, &malcount };
ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(cMem);
ZSTD_outBuffer out = { outBuffer, outSize, 0 };
ZSTD_inBuffer in = { inBuffer, inSize, 0 };
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_p_compressionLevel, (U32)compressionLevel) );
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_p_nbThreads, nbThreads) );
CHECK_Z( ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_continue) );
while ( ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_end) ) {}
ZSTD_freeCCtx(cctx);
DISPLAYLEVEL(3, "compress_generic,-T%u,continue level %i : ",
nbThreads, compressionLevel);
FUZ_displayMallocStats(malcount);
} } }
return 0;
}
#else
static int FUZ_mallocTests(unsigned seed, double compressibility, unsigned part)
{
(void)seed; (void)compressibility; (void)part;
return 0;
}
#endif
/*=============================================
* Unit tests
=============================================*/
#define CHECK_V(var, fn) size_t const var = fn; if (ZSTD_isError(var)) goto _output_error
@ -108,7 +266,8 @@ static int basicUnitTests(U32 seed, double compressibility)
{
size_t const CNBuffSize = 5 MB;
void* const CNBuffer = malloc(CNBuffSize);
void* const compressedBuffer = malloc(ZSTD_compressBound(CNBuffSize));
size_t const compressedBufferSize = ZSTD_compressBound(CNBuffSize);
void* const compressedBuffer = malloc(compressedBufferSize);
void* const decodedBuffer = malloc(CNBuffSize);
ZSTD_DCtx* dctx = ZSTD_createDCtx();
int testResult = 0;
@ -136,10 +295,20 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : compress %u bytes : ", testNb++, (U32)CNBuffSize);
CHECKPLUS(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(CNBuffSize),
CNBuffer, CNBuffSize, 1),
cSize=r );
DISPLAYLEVEL(4, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
{ ZSTD_CCtx* cctx = ZSTD_createCCtx();
if (cctx==NULL) goto _output_error;
CHECKPLUS(r, ZSTD_compressCCtx(cctx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, 1),
cSize=r );
DISPLAYLEVEL(4, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(4, "test%3i : size of cctx for level 1 : ", testNb++);
{ size_t const cctxSize = ZSTD_sizeof_CCtx(cctx);
DISPLAYLEVEL(4, "%u bytes \n", (U32)cctxSize);
}
ZSTD_freeCCtx(cctx);
}
DISPLAYLEVEL(4, "test%3i : ZSTD_getFrameContentSize test : ", testNb++);
@ -216,7 +385,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : simple compression test with static CCtx : ", testNb++);
CHECKPLUS(r, ZSTD_compressCCtx(staticCCtx,
compressedBuffer, ZSTD_compressBound(CNBuffSize),
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, STATIC_CCTX_LEVEL),
cSize=r );
DISPLAYLEVEL(4, "OK (%u bytes : %.2f%%)\n",
@ -285,7 +454,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : compress %u bytes with 2 threads : ", testNb++, (U32)CNBuffSize);
CHECKPLUS(r, ZSTDMT_compressCCtx(mtctx,
compressedBuffer, ZSTD_compressBound(CNBuffSize),
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize,
1),
cSize=r );
@ -311,6 +480,23 @@ static int basicUnitTests(U32 seed, double compressibility)
} }
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : compress -T2 with checksum : ", testNb++);
{ ZSTD_parameters params = ZSTD_getParams(1, CNBuffSize, 0);
params.fParams.checksumFlag = 1;
params.fParams.contentSizeFlag = 1;
CHECKPLUS(r, ZSTDMT_compress_advanced(mtctx,
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize,
NULL, params, 3 /*overlapRLog*/),
cSize=r );
}
DISPLAYLEVEL(4, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
DISPLAYLEVEL(4, "test%3i : decompress %u bytes : ", testNb++, (U32)CNBuffSize);
{ size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize);
if (r != CNBuffSize) goto _output_error; }
DISPLAYLEVEL(4, "OK \n");
ZSTDMT_freeCCtx(mtctx);
}
@ -372,7 +558,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : compress with flat dictionary : ", testNb++);
cSize = 0;
CHECKPLUS(r, ZSTD_compressEnd(ctxOrig, compressedBuffer, ZSTD_compressBound(CNBuffSize),
CHECKPLUS(r, ZSTD_compressEnd(ctxOrig, compressedBuffer, compressedBufferSize,
(const char*)CNBuffer + dictSize, CNBuffSize - dictSize),
cSize += r);
DISPLAYLEVEL(4, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
@ -388,7 +574,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : compress with duplicated context : ", testNb++);
{ size_t const cSizeOrig = cSize;
cSize = 0;
CHECKPLUS(r, ZSTD_compressEnd(ctxDuplicated, compressedBuffer, ZSTD_compressBound(CNBuffSize),
CHECKPLUS(r, ZSTD_compressEnd(ctxDuplicated, compressedBuffer, compressedBufferSize,
(const char*)CNBuffer + dictSize, CNBuffSize - dictSize),
cSize += r);
if (cSize != cSizeOrig) goto _output_error; /* should be identical ==> same size */
@ -473,7 +659,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "OK : %u \n", dictID);
DISPLAYLEVEL(4, "test%3i : compress with dictionary : ", testNb++);
cSize = ZSTD_compress_usingDict(cctx, compressedBuffer, ZSTD_compressBound(CNBuffSize),
cSize = ZSTD_compress_usingDict(cctx, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize,
dictBuffer, dictSize, 4);
if (ZSTD_isError(cSize)) goto _output_error;
@ -511,7 +697,7 @@ static int basicUnitTests(U32 seed, double compressibility)
1 /* byReference */, ZSTD_dm_auto,
cParams, ZSTD_defaultCMem);
DISPLAYLEVEL(4, "(size : %u) : ", (U32)ZSTD_sizeof_CDict(cdict));
cSize = ZSTD_compress_usingCDict(cctx, compressedBuffer, ZSTD_compressBound(CNBuffSize),
cSize = ZSTD_compress_usingCDict(cctx, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, cdict);
ZSTD_freeCDict(cdict);
if (ZSTD_isError(cSize)) goto _output_error;
@ -546,7 +732,7 @@ static int basicUnitTests(U32 seed, double compressibility)
goto _output_error;
}
cSize = ZSTD_compress_usingCDict(cctx,
compressedBuffer, ZSTD_compressBound(CNBuffSize),
compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, cdict);
if (ZSTD_isError(cSize)) {
DISPLAY("ZSTD_compress_usingCDict failed ");
@ -560,7 +746,7 @@ static int basicUnitTests(U32 seed, double compressibility)
{ ZSTD_frameParameters const fParams = { 0 /* frameSize */, 1 /* checksum */, 1 /* noDictID*/ };
ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBuffSize, dictSize);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictSize, 1 /*byRef*/, ZSTD_dm_auto, cParams, ZSTD_defaultCMem);
cSize = ZSTD_compress_usingCDict_advanced(cctx, compressedBuffer, ZSTD_compressBound(CNBuffSize),
cSize = ZSTD_compress_usingCDict_advanced(cctx, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize, cdict, fParams);
ZSTD_freeCDict(cdict);
if (ZSTD_isError(cSize)) goto _output_error;
@ -584,7 +770,7 @@ static int basicUnitTests(U32 seed, double compressibility)
DISPLAYLEVEL(4, "test%3i : ZSTD_compress_advanced, no dictID : ", testNb++);
{ ZSTD_parameters p = ZSTD_getParams(3, CNBuffSize, dictSize);
p.fParams.noDictIDFlag = 1;
cSize = ZSTD_compress_advanced(cctx, compressedBuffer, ZSTD_compressBound(CNBuffSize),
cSize = ZSTD_compress_advanced(cctx, compressedBuffer, compressedBufferSize,
CNBuffer, CNBuffSize,
dictBuffer, dictSize, p);
if (ZSTD_isError(cSize)) goto _output_error;
@ -821,6 +1007,42 @@ static int basicUnitTests(U32 seed, double compressibility)
if (r != _3BYTESTESTLENGTH) goto _output_error; }
DISPLAYLEVEL(4, "OK \n");
DISPLAYLEVEL(4, "test%3i : incompressible data and ill suited dictionary : ", testNb++);
RDG_genBuffer(CNBuffer, CNBuffSize, 0.0, 0.1, seed);
{ /* Train a dictionary on low characters */
size_t dictSize = 16 KB;
void* const dictBuffer = malloc(dictSize);
size_t const totalSampleSize = 1 MB;
size_t const sampleUnitSize = 8 KB;
U32 const nbSamples = (U32)(totalSampleSize / sampleUnitSize);
size_t* const samplesSizes = (size_t*) malloc(nbSamples * sizeof(size_t));
if (!dictBuffer || !samplesSizes) goto _output_error;
{ U32 u; for (u=0; u<nbSamples; u++) samplesSizes[u] = sampleUnitSize; }
dictSize = ZDICT_trainFromBuffer(dictBuffer, dictSize, CNBuffer, samplesSizes, nbSamples);
if (ZDICT_isError(dictSize)) goto _output_error;
/* Reverse the characters to make the dictionary ill suited */
{ U32 u;
for (u = 0; u < CNBuffSize; ++u) {
((BYTE*)CNBuffer)[u] = 255 - ((BYTE*)CNBuffer)[u];
}
}
{ /* Compress the data */
size_t const inputSize = 500;
size_t const outputSize = ZSTD_compressBound(inputSize);
void* const outputBuffer = malloc(outputSize);
ZSTD_CCtx* const cctx = ZSTD_createCCtx();
if (!outputBuffer || !cctx) goto _output_error;
CHECK(ZSTD_compress_usingDict(cctx, outputBuffer, outputSize, CNBuffer, inputSize, dictBuffer, dictSize, 1));
free(outputBuffer);
ZSTD_freeCCtx(cctx);
}
free(dictBuffer);
free(samplesSizes);
}
DISPLAYLEVEL(4, "OK \n");
/* findFrameCompressedSize on skippable frames */
DISPLAYLEVEL(4, "test%3i : frame compressed size of skippable frame : ", testNb++);
{ const char* frame = "\x50\x2a\x4d\x18\x05\x0\x0\0abcde";
@ -892,6 +1114,7 @@ static size_t FUZ_randomLength(U32* seed, U32 maxLog)
goto _output_error; \
} }
#undef CHECK_Z
#define CHECK_Z(f) { \
size_t const err = f; \
if (ZSTD_isError(err)) { \
@ -1223,6 +1446,19 @@ static unsigned readU32FromChar(const char** stringPtr)
return result;
}
/** longCommandWArg() :
* check if *stringPtr is the same as longCommand.
* If yes, @return 1 and advances *stringPtr to the position which immediately follows longCommand.
* @return 0 and doesn't modify *stringPtr otherwise.
*/
static unsigned longCommandWArg(const char** stringPtr, const char* longCommand)
{
size_t const comSize = strlen(longCommand);
int const result = !strncmp(*stringPtr, longCommand, comSize);
if (result) *stringPtr += comSize;
return result;
}
int main(int argc, const char** argv)
{
U32 seed = 0;
@ -1235,6 +1471,7 @@ int main(int argc, const char** argv)
U32 mainPause = 0;
U32 maxDuration = 0;
int bigTests = 1;
U32 memTestsOnly = 0;
const char* const programName = argv[0];
/* Check command line */
@ -1245,6 +1482,9 @@ int main(int argc, const char** argv)
/* Handle commands. Aggregated commands are allowed */
if (argument[0]=='-') {
if (longCommandWArg(&argument, "--memtest=")) { memTestsOnly = readU32FromChar(&argument); continue; }
if (!strcmp(argument, "--memtest")) { memTestsOnly=1; continue; }
if (!strcmp(argument, "--no-big-tests")) { bigTests=0; continue; }
argument++;
@ -1316,6 +1556,11 @@ int main(int argc, const char** argv)
DISPLAY("Seed = %u\n", seed);
if (proba!=FUZ_compressibility_default) DISPLAY("Compressibility : %u%%\n", proba);
if (memTestsOnly) {
g_displayLevel = MAX(3, g_displayLevel);
return FUZ_mallocTests(seed, ((double)proba) / 100, memTestsOnly);
}
if (nbTests < testNb) nbTests = testNb;
if (testNb==0)

View File

@ -7,17 +7,17 @@ die() {
roundTripTest() {
if [ -n "$3" ]; then
local_c="$3"
local_p="$2"
cLevel="$3"
proba="$2"
else
local_c="$2"
local_p=""
cLevel="$2"
proba=""
fi
rm -f tmp1 tmp2
$ECHO "roundTripTest: ./datagen $1 $local_p | $ZSTD -v$local_c | $ZSTD -d"
./datagen $1 $local_p | $MD5SUM > tmp1
./datagen $1 $local_p | $ZSTD --ultra -v$local_c | $ZSTD -d | $MD5SUM > tmp2
$ECHO "roundTripTest: ./datagen $1 $proba | $ZSTD -v$cLevel | $ZSTD -d"
./datagen $1 $proba | $MD5SUM > tmp1
./datagen $1 $proba | $ZSTD --ultra -v$cLevel | $ZSTD -d | $MD5SUM > tmp2
$DIFF -q tmp1 tmp2
}
@ -383,6 +383,7 @@ $ZSTD -t --rm tmp1.zst
test -f tmp1.zst # check file is still present
split -b16384 tmp1.zst tmpSplit.
$ZSTD -t tmpSplit.* && die "bad file not detected !"
./datagen | $ZSTD -c | $ZSTD -t
$ECHO "\n**** benchmark mode tests **** "
@ -625,16 +626,15 @@ roundTripTest -g35000000 -P75 10
roundTripTest -g35000000 -P75 11
roundTripTest -g35000000 -P75 12
roundTripTest -g18000000 -P80 13
roundTripTest -g18000000 -P80 14
roundTripTest -g18000000 -P80 15
roundTripTest -g18000000 -P80 16
roundTripTest -g18000000 -P80 17
roundTripTest -g18000013 -P80 13
roundTripTest -g18000014 -P80 14
roundTripTest -g18000015 -P80 15
roundTripTest -g18000016 -P80 16
roundTripTest -g18000017 -P80 17
roundTripTest -g18000018 -P94 18
roundTripTest -g18000019 -P94 19
roundTripTest -g50000000 -P94 18
roundTripTest -g50000000 -P94 19
roundTripTest -g99000000 -P99 20
roundTripTest -g68000020 -P99 20
roundTripTest -g6000000000 -P99 1
fileRoundTripTest -g4193M -P99 1
@ -644,7 +644,8 @@ then
$ECHO "\n**** zstdmt long round-trip tests **** "
roundTripTest -g99000000 -P99 "20 -T2"
roundTripTest -g6000000000 -P99 "1 -T2"
fileRoundTripTest -g4193M -P98 " -T0"
roundTripTest -g1500000000 -P97 "1 -T999"
fileRoundTripTest -g4195M -P98 " -T0"
else
$ECHO "\n**** no multithreading, skipping zstdmt tests **** "
fi

View File

@ -95,19 +95,6 @@ unsigned int FUZ_rand(unsigned int* seedPtr)
return rand32 >> 5;
}
static void* allocFunction(void* opaque, size_t size)
{
void* address = malloc(size);
(void)opaque;
return address;
}
static void freeFunction(void* opaque, void* address)
{
(void)opaque;
free(address);
}
/*======================================================
* Basic Unit tests
@ -1390,13 +1377,12 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double
/* multi-segments compression test */
XXH64_reset(&xxhState, 0);
{ ZSTD_outBuffer outBuff = { cBuffer, cBufferSize, 0 } ;
U32 n;
for (n=0, cSize=0, totalTestSize=0 ; totalTestSize < maxTestSize ; n++) {
for (cSize=0, totalTestSize=0 ; (totalTestSize < maxTestSize) ; ) {
/* compress random chunks into randomly sized dst buffers */
size_t const randomSrcSize = FUZ_randomLength(&lseed, maxSampleLog);
size_t const srcSize = MIN(maxTestSize-totalTestSize, randomSrcSize);
size_t const srcStart = FUZ_rand(&lseed) % (srcBufferSize - srcSize);
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog);
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog+1);
size_t const dstBuffSize = MIN(cBufferSize - cSize, randomDstSize);
ZSTD_EndDirective const flush = (FUZ_rand(&lseed) & 15) ? ZSTD_e_continue : ZSTD_e_flush;
ZSTD_inBuffer inBuff = { srcBuffer+srcStart, srcSize, 0 };
@ -1415,7 +1401,7 @@ static int fuzzerTests_newAPI(U32 seed, U32 nbTests, unsigned startTest, double
{ size_t remainingToFlush = (size_t)(-1);
while (remainingToFlush) {
ZSTD_inBuffer inBuff = { NULL, 0, 0 };
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog);
size_t const randomDstSize = FUZ_randomLength(&lseed, maxSampleLog+1);
size_t const adjustedDstSize = MIN(cBufferSize - cSize, randomDstSize);
outBuff.size = outBuff.pos + adjustedDstSize;
DISPLAYLEVEL(5, "End-flush into dst buffer of size %u \n", (U32)adjustedDstSize);
@ -1543,7 +1529,6 @@ int main(int argc, const char** argv)
int bigTests = (sizeof(size_t) == 8);
e_api selected_api = simple_api;
const char* const programName = argv[0];
ZSTD_customMem const customMem = { allocFunction, freeFunction, NULL };
ZSTD_customMem const customNULL = ZSTD_defaultCMem;
/* Check command line */
@ -1657,10 +1642,7 @@ int main(int argc, const char** argv)
if (testNb==0) {
result = basicUnitTests(0, ((double)proba) / 100, customNULL); /* constant seed for predictability */
if (!result) {
DISPLAYLEVEL(3, "Unit tests using customMem :\n")
result = basicUnitTests(0, ((double)proba) / 100, customMem); /* use custom memory allocation functions */
} }
}
if (!result) {
switch(selected_api)