2017-01-30 22:57:02 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017-present, Facebook, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This source code is licensed under the BSD-style license found in the
|
|
|
|
* LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
* of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
*/
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
/// Zstandard educational decoder implementation
|
|
|
|
/// See https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2017-08-12 00:53:37 +00:00
|
|
|
#include "zstd_decompress.h"
|
2017-01-30 22:42:21 +00:00
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
/******* UTILITY MACROS AND TYPES *********************************************/
|
2017-03-23 21:33:38 +00:00
|
|
|
// Max block size decompressed size is 128 KB and literal blocks can't be
|
|
|
|
// larger than their block
|
2017-01-30 22:57:02 +00:00
|
|
|
#define MAX_LITERALS_SIZE ((size_t)128 * 1024)
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
|
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
/// This decoder calls exit(1) when it encounters an error, however a production
|
|
|
|
/// library should propagate error codes
|
2017-01-30 19:42:45 +00:00
|
|
|
#define ERROR(s) \
|
|
|
|
do { \
|
|
|
|
fprintf(stderr, "Error: %s\n", s); \
|
|
|
|
exit(1); \
|
|
|
|
} while (0)
|
|
|
|
#define INP_SIZE() \
|
|
|
|
ERROR("Input buffer smaller than it should be or input is " \
|
|
|
|
"corrupted")
|
|
|
|
#define OUT_SIZE() ERROR("Output buffer too small for output")
|
|
|
|
#define CORRUPTION() ERROR("Corruption detected while decompressing")
|
|
|
|
#define BAD_ALLOC() ERROR("Memory allocation error")
|
2017-02-02 01:05:45 +00:00
|
|
|
#define IMPOSSIBLE() ERROR("An impossibility has occurred")
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
typedef uint8_t u8;
|
|
|
|
typedef uint16_t u16;
|
|
|
|
typedef uint32_t u32;
|
|
|
|
typedef uint64_t u64;
|
|
|
|
|
|
|
|
typedef int8_t i8;
|
|
|
|
typedef int16_t i16;
|
|
|
|
typedef int32_t i32;
|
|
|
|
typedef int64_t i64;
|
|
|
|
/******* END UTILITY MACROS AND TYPES *****************************************/
|
|
|
|
|
|
|
|
/******* IMPLEMENTATION PRIMITIVE PROTOTYPES **********************************/
|
|
|
|
/// The implementations for these functions can be found at the bottom of this
|
|
|
|
/// file. They implement low-level functionality needed for the higher level
|
|
|
|
/// decompression functions.
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
/*** IO STREAM OPERATIONS *************/
|
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
/// ostream_t/istream_t are used to wrap the pointers/length data passed into
|
|
|
|
/// ZSTD_decompress, so that all IO operations are safely bounds checked
|
|
|
|
/// They are written/read forward, and reads are treated as little-endian
|
|
|
|
/// They should be used opaquely to ensure safety
|
2017-02-02 01:05:45 +00:00
|
|
|
typedef struct {
|
|
|
|
u8 *ptr;
|
|
|
|
size_t len;
|
|
|
|
} ostream_t;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
const u8 *ptr;
|
|
|
|
size_t len;
|
2017-03-23 21:33:38 +00:00
|
|
|
|
|
|
|
// Input often reads a few bits at a time, so maintain an internal offset
|
|
|
|
int bit_offset;
|
2017-02-02 01:05:45 +00:00
|
|
|
} istream_t;
|
|
|
|
|
|
|
|
/// The following two functions are the only ones that allow the istream to be
|
|
|
|
/// non-byte aligned
|
|
|
|
|
|
|
|
/// Reads `num` bits from a bitstream, and updates the internal offset
|
2017-03-22 00:01:51 +00:00
|
|
|
static inline u64 IO_read_bits(istream_t *const in, const int num_bits);
|
2017-03-23 21:33:38 +00:00
|
|
|
/// Backs-up the stream by `num` bits so they can be read again
|
2017-03-22 00:01:51 +00:00
|
|
|
static inline void IO_rewind_bits(istream_t *const in, const int num_bits);
|
2017-02-02 01:05:45 +00:00
|
|
|
/// If the remaining bits in a byte will be unused, advance to the end of the
|
|
|
|
/// byte
|
|
|
|
static inline void IO_align_stream(istream_t *const in);
|
|
|
|
|
|
|
|
/// Write the given byte into the output stream
|
|
|
|
static inline void IO_write_byte(ostream_t *const out, u8 symb);
|
|
|
|
|
|
|
|
/// Returns the number of bytes left to be read in this stream. The stream must
|
|
|
|
/// be byte aligned.
|
|
|
|
static inline size_t IO_istream_len(const istream_t *const in);
|
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
/// Advances the stream by `len` bytes, and returns a pointer to the chunk that
|
|
|
|
/// was skipped. The stream must be byte aligned.
|
2017-02-02 01:05:45 +00:00
|
|
|
static inline const u8 *IO_read_bytes(istream_t *const in, size_t len);
|
2017-03-23 21:33:38 +00:00
|
|
|
/// Advances the stream by `len` bytes, and returns a pointer to the chunk that
|
|
|
|
/// was skipped so it can be written to.
|
2017-02-02 01:05:45 +00:00
|
|
|
static inline u8 *IO_write_bytes(ostream_t *const out, size_t len);
|
|
|
|
|
|
|
|
/// Advance the inner state by `len` bytes. The stream must be byte aligned.
|
|
|
|
static inline void IO_advance_input(istream_t *const in, size_t len);
|
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
/// Returns an `ostream_t` constructed from the given pointer and length.
|
2017-02-02 01:05:45 +00:00
|
|
|
static inline ostream_t IO_make_ostream(u8 *out, size_t len);
|
2017-03-23 21:33:38 +00:00
|
|
|
/// Returns an `istream_t` constructed from the given pointer and length.
|
2017-02-02 01:05:45 +00:00
|
|
|
static inline istream_t IO_make_istream(const u8 *in, size_t len);
|
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
/// Returns an `istream_t` with the same base as `in`, and length `len`.
|
|
|
|
/// Then, advance `in` to account for the consumed bytes.
|
|
|
|
/// `in` must be byte aligned.
|
2017-02-02 01:05:45 +00:00
|
|
|
static inline istream_t IO_make_sub_istream(istream_t *const in, size_t len);
|
|
|
|
/*** END IO STREAM OPERATIONS *********/
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
/*** BITSTREAM OPERATIONS *************/
|
2017-03-23 21:33:38 +00:00
|
|
|
/// Read `num` bits (up to 64) from `src + offset`, where `offset` is in bits,
|
|
|
|
/// and return them interpreted as a little-endian unsigned integer.
|
2017-03-22 00:01:51 +00:00
|
|
|
static inline u64 read_bits_LE(const u8 *src, const int num_bits,
|
2017-01-31 23:57:18 +00:00
|
|
|
const size_t offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Read bits from the end of a HUF or FSE bitstream. `offset` is in bits, so
|
|
|
|
/// it updates `offset` to `offset - bits`, and then reads `bits` bits from
|
|
|
|
/// `src + offset`. If the offset becomes negative, the extra bits at the
|
|
|
|
/// bottom are filled in with `0` bits instead of reading from before `src`.
|
2017-01-31 23:57:18 +00:00
|
|
|
static inline u64 STREAM_read_bits(const u8 *src, const int bits,
|
|
|
|
i64 *const offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
/*** END BITSTREAM OPERATIONS *********/
|
|
|
|
|
|
|
|
/*** BIT COUNTING OPERATIONS **********/
|
2017-03-22 00:01:51 +00:00
|
|
|
/// Returns the index of the highest set bit in `num`, or `-1` if `num == 0`
|
|
|
|
static inline int highest_set_bit(const u64 num);
|
2017-01-30 19:42:45 +00:00
|
|
|
/*** END BIT COUNTING OPERATIONS ******/
|
|
|
|
|
|
|
|
/*** HUFFMAN PRIMITIVES ***************/
|
|
|
|
// Table decode method uses exponential memory, so we need to limit depth
|
|
|
|
#define HUF_MAX_BITS (16)
|
|
|
|
|
|
|
|
// Limit the maximum number of symbols to 256 so we can store a symbol in a byte
|
|
|
|
#define HUF_MAX_SYMBS (256)
|
|
|
|
|
|
|
|
/// Structure containing all tables necessary for efficient Huffman decoding
|
|
|
|
typedef struct {
|
|
|
|
u8 *symbols;
|
|
|
|
u8 *num_bits;
|
|
|
|
int max_bits;
|
|
|
|
} HUF_dtable;
|
|
|
|
|
|
|
|
/// Decode a single symbol and read in enough bits to refresh the state
|
2017-01-31 23:57:18 +00:00
|
|
|
static inline u8 HUF_decode_symbol(const HUF_dtable *const dtable,
|
|
|
|
u16 *const state, const u8 *const src,
|
|
|
|
i64 *const offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
/// Read in a full state's worth of bits to initialize it
|
2017-01-31 23:57:18 +00:00
|
|
|
static inline void HUF_init_state(const HUF_dtable *const dtable,
|
|
|
|
u16 *const state, const u8 *const src,
|
|
|
|
i64 *const offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Decompresses a single Huffman stream, returns the number of bytes decoded.
|
|
|
|
/// `src_len` must be the exact length of the Huffman-coded block.
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t HUF_decompress_1stream(const HUF_dtable *const dtable,
|
|
|
|
ostream_t *const out, istream_t *const in);
|
2017-01-30 19:42:45 +00:00
|
|
|
/// Same as previous but decodes 4 streams, formatted as in the Zstandard
|
|
|
|
/// specification.
|
|
|
|
/// `src_len` must be the exact length of the Huffman-coded block.
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t HUF_decompress_4stream(const HUF_dtable *const dtable,
|
|
|
|
ostream_t *const out, istream_t *const in);
|
2017-01-31 23:57:18 +00:00
|
|
|
|
|
|
|
/// Initialize a Huffman decoding table using the table of bit counts provided
|
|
|
|
static void HUF_init_dtable(HUF_dtable *const table, const u8 *const bits,
|
|
|
|
const int num_symbs);
|
|
|
|
/// Initialize a Huffman decoding table using the table of weights provided
|
|
|
|
/// Weights follow the definition provided in the Zstandard specification
|
|
|
|
static void HUF_init_dtable_usingweights(HUF_dtable *const table,
|
|
|
|
const u8 *const weights,
|
|
|
|
const int num_symbs);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Free the malloc'ed parts of a decoding table
|
2017-01-31 23:57:18 +00:00
|
|
|
static void HUF_free_dtable(HUF_dtable *const dtable);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Deep copy a decoding table, so that it can be used and free'd without
|
|
|
|
/// impacting the source table.
|
2017-01-31 23:57:18 +00:00
|
|
|
static void HUF_copy_dtable(HUF_dtable *const dst, const HUF_dtable *const src);
|
2017-01-30 19:42:45 +00:00
|
|
|
/*** END HUFFMAN PRIMITIVES ***********/
|
|
|
|
|
|
|
|
/*** FSE PRIMITIVES *******************/
|
|
|
|
/// For more description of FSE see
|
|
|
|
/// https://github.com/Cyan4973/FiniteStateEntropy/
|
|
|
|
|
|
|
|
// FSE table decoding uses exponential memory, so limit the maximum accuracy
|
|
|
|
#define FSE_MAX_ACCURACY_LOG (15)
|
|
|
|
// Limit the maximum number of symbols so they can be stored in a single byte
|
|
|
|
#define FSE_MAX_SYMBS (256)
|
|
|
|
|
|
|
|
/// The tables needed to decode FSE encoded streams
|
|
|
|
typedef struct {
|
|
|
|
u8 *symbols;
|
|
|
|
u8 *num_bits;
|
|
|
|
u16 *new_state_base;
|
|
|
|
int accuracy_log;
|
|
|
|
} FSE_dtable;
|
|
|
|
|
|
|
|
/// Return the symbol for the current state
|
2017-01-31 23:57:18 +00:00
|
|
|
static inline u8 FSE_peek_symbol(const FSE_dtable *const dtable,
|
|
|
|
const u16 state);
|
2017-01-30 19:42:45 +00:00
|
|
|
/// Read the number of bits necessary to update state, update, and shift offset
|
|
|
|
/// back to reflect the bits read
|
2017-01-31 23:57:18 +00:00
|
|
|
static inline void FSE_update_state(const FSE_dtable *const dtable,
|
|
|
|
u16 *const state, const u8 *const src,
|
|
|
|
i64 *const offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Combine peek and update: decode a symbol and update the state
|
2017-01-31 23:57:18 +00:00
|
|
|
static inline u8 FSE_decode_symbol(const FSE_dtable *const dtable,
|
|
|
|
u16 *const state, const u8 *const src,
|
|
|
|
i64 *const offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Read bits from the stream to initialize the state and shift offset back
|
2017-01-31 23:57:18 +00:00
|
|
|
static inline void FSE_init_state(const FSE_dtable *const dtable,
|
|
|
|
u16 *const state, const u8 *const src,
|
|
|
|
i64 *const offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Decompress two interleaved bitstreams (e.g. compressed Huffman weights)
|
|
|
|
/// using an FSE decoding table. `src_len` must be the exact length of the
|
|
|
|
/// block.
|
2017-01-31 23:57:18 +00:00
|
|
|
static size_t FSE_decompress_interleaved2(const FSE_dtable *const dtable,
|
2017-02-02 01:05:45 +00:00
|
|
|
ostream_t *const out,
|
|
|
|
istream_t *const in);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Initialize a decoding table using normalized frequencies.
|
2017-01-31 23:57:18 +00:00
|
|
|
static void FSE_init_dtable(FSE_dtable *const dtable,
|
|
|
|
const i16 *const norm_freqs, const int num_symbs,
|
|
|
|
const int accuracy_log);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Decode an FSE header as defined in the Zstandard format specification and
|
|
|
|
/// use the decoded frequencies to initialize a decoding table.
|
2017-02-02 01:05:45 +00:00
|
|
|
static void FSE_decode_header(FSE_dtable *const dtable, istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
const int max_accuracy_log);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Initialize an FSE table that will always return the same symbol and consume
|
|
|
|
/// 0 bits per symbol, to be used for RLE mode in sequence commands
|
2017-01-31 23:57:18 +00:00
|
|
|
static void FSE_init_dtable_rle(FSE_dtable *const dtable, const u8 symb);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Free the malloc'ed parts of a decoding table
|
2017-01-31 23:57:18 +00:00
|
|
|
static void FSE_free_dtable(FSE_dtable *const dtable);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Deep copy a decoding table, so that it can be used and free'd without
|
|
|
|
/// impacting the source table.
|
2017-01-31 23:57:18 +00:00
|
|
|
static void FSE_copy_dtable(FSE_dtable *const dst, const FSE_dtable *const src);
|
2017-01-30 19:42:45 +00:00
|
|
|
/*** END FSE PRIMITIVES ***************/
|
|
|
|
|
|
|
|
/******* END IMPLEMENTATION PRIMITIVE PROTOTYPES ******************************/
|
|
|
|
|
|
|
|
/******* ZSTD HELPER STRUCTS AND PROTOTYPES ***********************************/
|
|
|
|
|
2017-01-30 22:42:21 +00:00
|
|
|
/// A small structure that can be reused in various places that need to access
|
|
|
|
/// frame header information
|
2017-01-30 19:42:45 +00:00
|
|
|
typedef struct {
|
2017-01-30 22:42:21 +00:00
|
|
|
// The size of window that we need to be able to contiguously store for
|
|
|
|
// references
|
2017-01-30 19:42:45 +00:00
|
|
|
size_t window_size;
|
2017-01-30 22:42:21 +00:00
|
|
|
// The total output size of this compressed frame
|
2017-01-30 19:42:45 +00:00
|
|
|
size_t frame_content_size;
|
|
|
|
|
2017-01-30 22:42:21 +00:00
|
|
|
// The dictionary id if this frame uses one
|
|
|
|
u32 dictionary_id;
|
|
|
|
|
|
|
|
// Whether or not the content of this frame has a checksum
|
|
|
|
int content_checksum_flag;
|
|
|
|
// Whether or not the output for this frame is in a single segment
|
|
|
|
int single_segment_flag;
|
|
|
|
} frame_header_t;
|
|
|
|
|
|
|
|
/// The context needed to decode blocks in a frame
|
|
|
|
typedef struct {
|
|
|
|
frame_header_t header;
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
// The total amount of data available for backreferences, to determine if an
|
|
|
|
// offset too large to be correct
|
|
|
|
size_t current_total_output;
|
|
|
|
|
2017-01-31 23:54:02 +00:00
|
|
|
const u8 *dict_content;
|
|
|
|
size_t dict_content_len;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Entropy encoding tables so they can be repeated by future blocks instead
|
2017-01-31 23:54:02 +00:00
|
|
|
// of retransmitting
|
2017-01-30 19:42:45 +00:00
|
|
|
HUF_dtable literals_dtable;
|
|
|
|
FSE_dtable ll_dtable;
|
|
|
|
FSE_dtable ml_dtable;
|
|
|
|
FSE_dtable of_dtable;
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
// The last 3 offsets for the special "repeat offsets".
|
|
|
|
u64 previous_offsets[3];
|
2017-01-30 19:42:45 +00:00
|
|
|
} frame_context_t;
|
|
|
|
|
|
|
|
/// The decoded contents of a dictionary so that it doesn't have to be repeated
|
|
|
|
/// for each frame that uses it
|
2017-08-12 00:53:37 +00:00
|
|
|
struct dictionary_s {
|
2017-01-30 19:42:45 +00:00
|
|
|
// Entropy tables
|
|
|
|
HUF_dtable literals_dtable;
|
|
|
|
FSE_dtable ll_dtable;
|
|
|
|
FSE_dtable ml_dtable;
|
|
|
|
FSE_dtable of_dtable;
|
|
|
|
|
|
|
|
// Raw content for backreferences
|
|
|
|
u8 *content;
|
|
|
|
size_t content_size;
|
|
|
|
|
|
|
|
// Offset history to prepopulate the frame's history
|
2017-02-02 01:05:45 +00:00
|
|
|
u64 previous_offsets[3];
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
u32 dictionary_id;
|
2017-08-12 00:53:37 +00:00
|
|
|
};
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// A tuple containing the parts necessary to decode and execute a ZSTD sequence
|
|
|
|
/// command
|
|
|
|
typedef struct {
|
|
|
|
u32 literal_length;
|
|
|
|
u32 match_length;
|
|
|
|
u32 offset;
|
|
|
|
} sequence_command_t;
|
|
|
|
|
|
|
|
/// The decoder works top-down, starting at the high level like Zstd frames, and
|
|
|
|
/// working down to lower more technical levels such as blocks, literals, and
|
|
|
|
/// sequences. The high-level functions roughly follow the outline of the
|
|
|
|
/// format specification:
|
|
|
|
/// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md
|
|
|
|
|
|
|
|
/// Before the implementation of each high-level function declared here, the
|
|
|
|
/// prototypes for their helper functions are defined and explained
|
|
|
|
|
|
|
|
/// Decode a single Zstd frame, or error if the input is not a valid frame.
|
|
|
|
/// Accepts a dict argument, which may be NULL indicating no dictionary.
|
|
|
|
/// See
|
|
|
|
/// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame-concatenation
|
2017-02-02 01:05:45 +00:00
|
|
|
static void decode_frame(ostream_t *const out, istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
const dictionary_t *const dict);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Decode data in a compressed block
|
2017-02-02 01:05:45 +00:00
|
|
|
static void decompress_block(frame_context_t *const ctx, ostream_t *const out,
|
|
|
|
istream_t *const in);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Decode the literals section of a block
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t decode_literals(frame_context_t *const ctx, istream_t *const in,
|
|
|
|
u8 **const literals);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Decode the sequences part of a block
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t decode_sequences(frame_context_t *const ctx, istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
sequence_command_t **const sequences);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Execute the decoded sequences on the literals block
|
2017-02-02 01:05:45 +00:00
|
|
|
static void execute_sequences(frame_context_t *const ctx, ostream_t *const out,
|
|
|
|
const u8 *const literals,
|
|
|
|
const size_t literals_len,
|
2017-01-31 23:57:18 +00:00
|
|
|
const sequence_command_t *const sequences,
|
2017-02-02 01:05:45 +00:00
|
|
|
const size_t num_sequences);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/******* END ZSTD HELPER STRUCTS AND PROTOTYPES *******************************/
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
size_t ZSTD_decompress(void *const dst, const size_t dst_len,
|
|
|
|
const void *const src, const size_t src_len) {
|
2017-08-12 01:40:19 +00:00
|
|
|
dictionary_t* uninit_dict = create_dictionary();
|
|
|
|
size_t const decomp_size = ZSTD_decompress_with_dict(dst, dst_len, src,
|
|
|
|
src_len, uninit_dict);
|
|
|
|
free_dictionary(uninit_dict);
|
|
|
|
return decomp_size;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
size_t ZSTD_decompress_with_dict(void *const dst, const size_t dst_len,
|
|
|
|
const void *const src, const size_t src_len,
|
2017-08-12 01:40:19 +00:00
|
|
|
dictionary_t* parsed_dict) {
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
istream_t in = IO_make_istream(src, src_len);
|
|
|
|
ostream_t out = IO_make_ostream(dst, dst_len);
|
2017-02-04 02:04:00 +00:00
|
|
|
|
|
|
|
// "A content compressed by Zstandard is transformed into a Zstandard frame.
|
|
|
|
// Multiple frames can be appended into a single file or stream. A frame is
|
|
|
|
// totally independent, has a defined beginning and end, and a set of
|
|
|
|
// parameters which tells the decoder how to decompress it."
|
2017-08-11 21:34:49 +00:00
|
|
|
|
|
|
|
/* this decoder assumes decompression of a single frame */
|
2017-08-12 01:40:19 +00:00
|
|
|
decode_frame(&out, &in, parsed_dict);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
return out.ptr - (u8 *)dst;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/******* FRAME DECODING ******************************************************/
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
static void decode_data_frame(ostream_t *const out, istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
const dictionary_t *const dict);
|
2017-02-02 01:05:45 +00:00
|
|
|
static void init_frame_context(frame_context_t *const context,
|
|
|
|
istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
const dictionary_t *const dict);
|
|
|
|
static void free_frame_context(frame_context_t *const context);
|
|
|
|
static void parse_frame_header(frame_header_t *const header,
|
2017-02-02 01:05:45 +00:00
|
|
|
istream_t *const in);
|
2017-01-31 23:57:18 +00:00
|
|
|
static void frame_context_apply_dict(frame_context_t *const ctx,
|
|
|
|
const dictionary_t *const dict);
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
static void decompress_data(frame_context_t *const ctx, ostream_t *const out,
|
|
|
|
istream_t *const in);
|
2017-01-31 23:57:18 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
static void decode_frame(ostream_t *const out, istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
const dictionary_t *const dict) {
|
2017-02-02 01:05:45 +00:00
|
|
|
const u32 magic_number = IO_read_bits(in, 32);
|
2017-02-04 02:04:00 +00:00
|
|
|
// Zstandard frame
|
|
|
|
//
|
|
|
|
// "Magic_Number
|
|
|
|
//
|
|
|
|
// 4 Bytes, little-endian format. Value : 0xFD2FB528"
|
|
|
|
if (magic_number == 0xFD2FB528U) {
|
2017-01-30 19:42:45 +00:00
|
|
|
// ZSTD frame
|
2017-02-02 01:05:45 +00:00
|
|
|
decode_data_frame(out, in, dict);
|
2017-02-04 02:04:00 +00:00
|
|
|
|
|
|
|
return;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
2017-02-04 02:04:00 +00:00
|
|
|
|
2017-08-11 21:34:49 +00:00
|
|
|
// not a real frame or a skippable frame
|
|
|
|
ERROR("Tried to decode non-ZSTD frame");
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Decode a frame that contains compressed data. Not all frames do as there
|
|
|
|
/// are skippable frames.
|
|
|
|
/// See
|
|
|
|
/// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#general-structure-of-zstandard-frame-format
|
2017-02-02 01:05:45 +00:00
|
|
|
static void decode_data_frame(ostream_t *const out, istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
const dictionary_t *const dict) {
|
2017-01-30 19:42:45 +00:00
|
|
|
frame_context_t ctx;
|
|
|
|
|
|
|
|
// Initialize the context that needs to be carried from block to block
|
2017-02-02 01:05:45 +00:00
|
|
|
init_frame_context(&ctx, in, dict);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-01-30 22:42:21 +00:00
|
|
|
if (ctx.header.frame_content_size != 0 &&
|
2017-02-02 01:05:45 +00:00
|
|
|
ctx.header.frame_content_size > out->len) {
|
2017-01-30 19:42:45 +00:00
|
|
|
OUT_SIZE();
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
decompress_data(&ctx, out, in);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
free_frame_context(&ctx);
|
|
|
|
}
|
|
|
|
|
2017-01-30 22:42:21 +00:00
|
|
|
/// Takes the information provided in the header and dictionary, and initializes
|
|
|
|
/// the context for this frame
|
2017-02-02 01:05:45 +00:00
|
|
|
static void init_frame_context(frame_context_t *const context,
|
|
|
|
istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
const dictionary_t *const dict) {
|
|
|
|
// Most fields in context are correct when initialized to 0
|
2017-02-02 01:05:45 +00:00
|
|
|
memset(context, 0, sizeof(frame_context_t));
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-01-30 22:42:21 +00:00
|
|
|
// Parse data from the frame header
|
2017-02-02 01:05:45 +00:00
|
|
|
parse_frame_header(&context->header, in);
|
2017-01-30 22:42:21 +00:00
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
// Set up the offset history for the repeat offset commands
|
2017-02-02 01:05:45 +00:00
|
|
|
context->previous_offsets[0] = 1;
|
|
|
|
context->previous_offsets[1] = 4;
|
|
|
|
context->previous_offsets[2] = 8;
|
2017-01-30 22:42:21 +00:00
|
|
|
|
|
|
|
// Apply details from the dict if it exists
|
|
|
|
frame_context_apply_dict(context, dict);
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static void free_frame_context(frame_context_t *const context) {
|
2017-01-30 19:42:45 +00:00
|
|
|
HUF_free_dtable(&context->literals_dtable);
|
|
|
|
|
|
|
|
FSE_free_dtable(&context->ll_dtable);
|
|
|
|
FSE_free_dtable(&context->ml_dtable);
|
|
|
|
FSE_free_dtable(&context->of_dtable);
|
|
|
|
|
|
|
|
memset(context, 0, sizeof(frame_context_t));
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static void parse_frame_header(frame_header_t *const header,
|
2017-02-02 01:05:45 +00:00
|
|
|
istream_t *const in) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "The first header's byte is called the Frame_Header_Descriptor. It tells
|
|
|
|
// which other fields are present. Decoding this byte is enough to tell the
|
|
|
|
// size of Frame_Header.
|
|
|
|
//
|
|
|
|
// Bit number Field name
|
|
|
|
// 7-6 Frame_Content_Size_flag
|
|
|
|
// 5 Single_Segment_flag
|
|
|
|
// 4 Unused_bit
|
|
|
|
// 3 Reserved_bit
|
|
|
|
// 2 Content_Checksum_flag
|
|
|
|
// 1-0 Dictionary_ID_flag"
|
2017-02-02 01:05:45 +00:00
|
|
|
const u8 descriptor = IO_read_bits(in, 8);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// decode frame header descriptor into flags
|
2017-01-31 23:57:18 +00:00
|
|
|
const u8 frame_content_size_flag = descriptor >> 6;
|
|
|
|
const u8 single_segment_flag = (descriptor >> 5) & 1;
|
|
|
|
const u8 reserved_bit = (descriptor >> 3) & 1;
|
|
|
|
const u8 content_checksum_flag = (descriptor >> 2) & 1;
|
|
|
|
const u8 dictionary_id_flag = descriptor & 3;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
if (reserved_bit != 0) {
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
|
2017-01-30 22:42:21 +00:00
|
|
|
header->single_segment_flag = single_segment_flag;
|
|
|
|
header->content_checksum_flag = content_checksum_flag;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// decode window size
|
|
|
|
if (!single_segment_flag) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Provides guarantees on maximum back-reference distance that will be
|
|
|
|
// used within compressed data. This information is important for
|
|
|
|
// decoders to allocate enough memory.
|
|
|
|
//
|
|
|
|
// Bit numbers 7-3 2-0
|
|
|
|
// Field name Exponent Mantissa"
|
2017-02-02 01:05:45 +00:00
|
|
|
u8 window_descriptor = IO_read_bits(in, 8);
|
2017-01-30 19:42:45 +00:00
|
|
|
u8 exponent = window_descriptor >> 3;
|
|
|
|
u8 mantissa = window_descriptor & 7;
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// Use the algorithm from the specification to compute window size
|
|
|
|
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
|
2017-01-30 19:42:45 +00:00
|
|
|
size_t window_base = (size_t)1 << (10 + exponent);
|
|
|
|
size_t window_add = (window_base / 8) * mantissa;
|
2017-01-30 22:42:21 +00:00
|
|
|
header->window_size = window_base + window_add;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// decode dictionary id if it exists
|
|
|
|
if (dictionary_id_flag) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "This is a variable size field, which contains the ID of the
|
|
|
|
// dictionary required to properly decode the frame. Note that this
|
|
|
|
// field is optional. When it's not present, it's up to the caller to
|
|
|
|
// make sure it uses the correct dictionary. Format is little-endian."
|
2017-01-30 19:42:45 +00:00
|
|
|
const int bytes_array[] = {0, 1, 2, 4};
|
|
|
|
const int bytes = bytes_array[dictionary_id_flag];
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
header->dictionary_id = IO_read_bits(in, bytes * 8);
|
2017-01-30 19:42:45 +00:00
|
|
|
} else {
|
2017-01-30 22:42:21 +00:00
|
|
|
header->dictionary_id = 0;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// decode frame content size if it exists
|
|
|
|
if (single_segment_flag || frame_content_size_flag) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "This is the original (uncompressed) size. This information is
|
|
|
|
// optional. The Field_Size is provided according to value of
|
|
|
|
// Frame_Content_Size_flag. The Field_Size can be equal to 0 (not
|
|
|
|
// present), 1, 2, 4 or 8 bytes. Format is little-endian."
|
|
|
|
//
|
2017-01-30 19:42:45 +00:00
|
|
|
// if frame_content_size_flag == 0 but single_segment_flag is set, we
|
2017-01-30 22:42:21 +00:00
|
|
|
// still have a 1 byte field
|
2017-01-30 19:42:45 +00:00
|
|
|
const int bytes_array[] = {1, 2, 4, 8};
|
|
|
|
const int bytes = bytes_array[frame_content_size_flag];
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
header->frame_content_size = IO_read_bits(in, bytes * 8);
|
2017-01-30 19:42:45 +00:00
|
|
|
if (bytes == 2) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "When Field_Size is 2, the offset of 256 is added."
|
2017-01-30 22:42:21 +00:00
|
|
|
header->frame_content_size += 256;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
2017-01-30 22:42:21 +00:00
|
|
|
} else {
|
|
|
|
header->frame_content_size = 0;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-01-31 23:54:02 +00:00
|
|
|
if (single_segment_flag) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "The Window_Descriptor byte is optional. It is absent when
|
|
|
|
// Single_Segment_flag is set. In this case, the maximum back-reference
|
|
|
|
// distance is the content size itself, which can be any value from 1 to
|
|
|
|
// 2^64-1 bytes (16 EB)."
|
2017-01-31 23:54:02 +00:00
|
|
|
header->window_size = header->frame_content_size;
|
|
|
|
}
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// A dictionary acts as initializing values for the frame context before
|
|
|
|
/// decompression, so we implement it by applying it's predetermined
|
|
|
|
/// tables and content to the context before beginning decompression
|
2017-01-31 23:57:18 +00:00
|
|
|
static void frame_context_apply_dict(frame_context_t *const ctx,
|
|
|
|
const dictionary_t *const dict) {
|
2017-01-30 19:42:45 +00:00
|
|
|
// If the content pointer is NULL then it must be an empty dict
|
|
|
|
if (!dict || !dict->content)
|
|
|
|
return;
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
// If the requested dictionary_id is non-zero, the correct dictionary must
|
|
|
|
// be present
|
2017-01-30 22:42:21 +00:00
|
|
|
if (ctx->header.dictionary_id != 0 &&
|
|
|
|
ctx->header.dictionary_id != dict->dictionary_id) {
|
2017-02-02 01:05:45 +00:00
|
|
|
ERROR("Wrong dictionary provided");
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
// Copy the dict content to the context for references during sequence
|
|
|
|
// execution
|
2017-01-31 23:54:02 +00:00
|
|
|
ctx->dict_content = dict->content;
|
|
|
|
ctx->dict_content_len = dict->content_size;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// If it's a formatted dict copy the precomputed tables in so they can
|
|
|
|
// be used in the table repeat modes
|
|
|
|
if (dict->dictionary_id != 0) {
|
|
|
|
// Deep copy the entropy tables so they can be freed independently of
|
2017-01-30 22:42:21 +00:00
|
|
|
// the dictionary struct
|
2017-01-30 19:42:45 +00:00
|
|
|
HUF_copy_dtable(&ctx->literals_dtable, &dict->literals_dtable);
|
|
|
|
FSE_copy_dtable(&ctx->ll_dtable, &dict->ll_dtable);
|
|
|
|
FSE_copy_dtable(&ctx->of_dtable, &dict->of_dtable);
|
|
|
|
FSE_copy_dtable(&ctx->ml_dtable, &dict->ml_dtable);
|
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
// Copy the repeated offsets
|
2017-01-30 19:42:45 +00:00
|
|
|
memcpy(ctx->previous_offsets, dict->previous_offsets,
|
|
|
|
sizeof(ctx->previous_offsets));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Decompress the data from a frame block by block
|
2017-02-02 01:05:45 +00:00
|
|
|
static void decompress_data(frame_context_t *const ctx, ostream_t *const out,
|
|
|
|
istream_t *const in) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "A frame encapsulates one or multiple blocks. Each block can be
|
|
|
|
// compressed or not, and has a guaranteed maximum content size, which
|
|
|
|
// depends on frame parameters. Unlike frames, each block depends on
|
|
|
|
// previous blocks for proper decoding. However, each block can be
|
|
|
|
// decompressed without waiting for its successor, allowing streaming
|
|
|
|
// operations."
|
2017-01-30 22:42:21 +00:00
|
|
|
int last_block = 0;
|
2017-01-30 19:42:45 +00:00
|
|
|
do {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Last_Block
|
|
|
|
//
|
|
|
|
// The lowest bit signals if this block is the last one. Frame ends
|
|
|
|
// right after this block.
|
|
|
|
//
|
|
|
|
// Block_Type and Block_Size
|
|
|
|
//
|
|
|
|
// The next 2 bits represent the Block_Type, while the remaining 21 bits
|
|
|
|
// represent the Block_Size. Format is little-endian."
|
2017-02-02 01:05:45 +00:00
|
|
|
last_block = IO_read_bits(in, 1);
|
|
|
|
const int block_type = IO_read_bits(in, 2);
|
|
|
|
const size_t block_len = IO_read_bits(in, 21);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
switch (block_type) {
|
|
|
|
case 0: {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Raw_Block - this is an uncompressed block. Block_Size is the
|
|
|
|
// number of bytes to read and copy."
|
2017-02-02 01:05:45 +00:00
|
|
|
const u8 *const read_ptr = IO_read_bytes(in, block_len);
|
|
|
|
u8 *const write_ptr = IO_write_bytes(out, block_len);
|
2017-03-23 21:33:38 +00:00
|
|
|
|
2017-01-31 23:54:02 +00:00
|
|
|
// Copy the raw data into the output
|
2017-02-02 01:05:45 +00:00
|
|
|
memcpy(write_ptr, read_ptr, block_len);
|
2017-01-31 23:54:02 +00:00
|
|
|
|
|
|
|
ctx->current_total_output += block_len;
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 1: {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "RLE_Block - this is a single byte, repeated N times. In which
|
|
|
|
// case, Block_Size is the size to regenerate, while the
|
|
|
|
// "compressed" block is just 1 byte (the byte to repeat)."
|
2017-02-02 01:05:45 +00:00
|
|
|
const u8 *const read_ptr = IO_read_bytes(in, 1);
|
|
|
|
u8 *const write_ptr = IO_write_bytes(out, block_len);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
// Copy `block_len` copies of `read_ptr[0]` to the output
|
2017-02-02 01:05:45 +00:00
|
|
|
memset(write_ptr, read_ptr[0], block_len);
|
2017-01-31 23:54:02 +00:00
|
|
|
|
|
|
|
ctx->current_total_output += block_len;
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-02-02 01:05:45 +00:00
|
|
|
case 2: {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Compressed_Block - this is a Zstandard compressed block,
|
|
|
|
// detailed in another section of this specification. Block_Size is
|
|
|
|
// the compressed size.
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
// Create a sub-stream for the block
|
|
|
|
istream_t block_stream = IO_make_sub_istream(in, block_len);
|
|
|
|
decompress_block(ctx, out, &block_stream);
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
2017-02-02 01:05:45 +00:00
|
|
|
}
|
2017-01-30 22:42:21 +00:00
|
|
|
case 3:
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Reserved - this is not a block. This value cannot be used with
|
|
|
|
// current version of this specification."
|
2017-01-30 22:42:21 +00:00
|
|
|
CORRUPTION();
|
|
|
|
break;
|
2017-02-02 01:05:45 +00:00
|
|
|
default:
|
|
|
|
IMPOSSIBLE();
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
} while (!last_block);
|
|
|
|
|
2017-01-30 22:42:21 +00:00
|
|
|
if (ctx->header.content_checksum_flag) {
|
2017-01-30 19:42:45 +00:00
|
|
|
// This program does not support checking the checksum, so skip over it
|
2017-01-30 22:42:21 +00:00
|
|
|
// if it's present
|
2017-02-02 01:05:45 +00:00
|
|
|
IO_advance_input(in, 4);
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/******* END FRAME DECODING ***************************************************/
|
|
|
|
|
|
|
|
/******* BLOCK DECOMPRESSION **************************************************/
|
2017-02-02 01:05:45 +00:00
|
|
|
static void decompress_block(frame_context_t *const ctx, ostream_t *const out,
|
|
|
|
istream_t *const in) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "A compressed block consists of 2 sections :
|
|
|
|
//
|
|
|
|
// Literals_Section
|
|
|
|
// Sequences_Section"
|
|
|
|
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
// Part 1: decode the literals block
|
|
|
|
u8 *literals = NULL;
|
2017-02-02 01:05:45 +00:00
|
|
|
const size_t literals_size = decode_literals(ctx, in, &literals);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Part 2: decode the sequences block
|
|
|
|
sequence_command_t *sequences = NULL;
|
2017-01-31 23:57:18 +00:00
|
|
|
const size_t num_sequences =
|
2017-02-02 01:05:45 +00:00
|
|
|
decode_sequences(ctx, in, &sequences);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Part 3: combine literals and sequence commands to generate output
|
2017-02-02 01:05:45 +00:00
|
|
|
execute_sequences(ctx, out, literals, literals_size, sequences,
|
|
|
|
num_sequences);
|
2017-01-30 19:42:45 +00:00
|
|
|
free(literals);
|
|
|
|
free(sequences);
|
|
|
|
}
|
|
|
|
/******* END BLOCK DECOMPRESSION **********************************************/
|
|
|
|
|
|
|
|
/******* LITERALS DECODING ****************************************************/
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t decode_literals_simple(istream_t *const in, u8 **const literals,
|
|
|
|
const int block_type,
|
2017-01-31 23:57:18 +00:00
|
|
|
const int size_format);
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t decode_literals_compressed(frame_context_t *const ctx,
|
|
|
|
istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
u8 **const literals,
|
|
|
|
const int block_type,
|
|
|
|
const int size_format);
|
2017-03-23 21:33:38 +00:00
|
|
|
static void decode_huf_table(HUF_dtable *const dtable, istream_t *const in);
|
2017-02-02 01:05:45 +00:00
|
|
|
static void fse_decode_hufweights(ostream_t *weights, istream_t *const in,
|
|
|
|
int *const num_symbs);
|
|
|
|
|
|
|
|
static size_t decode_literals(frame_context_t *const ctx, istream_t *const in,
|
|
|
|
u8 **const literals) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Literals can be stored uncompressed or compressed using Huffman prefix
|
|
|
|
// codes. When compressed, an optional tree description can be present,
|
|
|
|
// followed by 1 or 4 streams."
|
|
|
|
//
|
|
|
|
// "Literals_Section_Header
|
|
|
|
//
|
|
|
|
// Header is in charge of describing how literals are packed. It's a
|
|
|
|
// byte-aligned variable-size bitfield, ranging from 1 to 5 bytes, using
|
|
|
|
// little-endian convention."
|
|
|
|
//
|
|
|
|
// "Literals_Block_Type
|
|
|
|
//
|
|
|
|
// This field uses 2 lowest bits of first byte, describing 4 different block
|
|
|
|
// types"
|
|
|
|
//
|
|
|
|
// size_format takes between 1 and 2 bits
|
2017-02-02 01:05:45 +00:00
|
|
|
int block_type = IO_read_bits(in, 2);
|
|
|
|
int size_format = IO_read_bits(in, 2);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
if (block_type <= 1) {
|
|
|
|
// Raw or RLE literals block
|
2017-02-02 01:05:45 +00:00
|
|
|
return decode_literals_simple(in, literals, block_type,
|
2017-01-30 19:42:45 +00:00
|
|
|
size_format);
|
|
|
|
} else {
|
|
|
|
// Huffman compressed literals
|
2017-02-02 01:05:45 +00:00
|
|
|
return decode_literals_compressed(ctx, in, literals, block_type,
|
2017-01-30 19:42:45 +00:00
|
|
|
size_format);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Decodes literals blocks in raw or RLE form
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t decode_literals_simple(istream_t *const in, u8 **const literals,
|
|
|
|
const int block_type,
|
2017-01-31 23:57:18 +00:00
|
|
|
const int size_format) {
|
2017-01-30 19:42:45 +00:00
|
|
|
size_t size;
|
|
|
|
switch (size_format) {
|
2017-02-02 01:05:45 +00:00
|
|
|
// These cases are in the form ?0
|
|
|
|
// In this case, the ? bit is actually part of the size field
|
2017-01-30 19:42:45 +00:00
|
|
|
case 0:
|
|
|
|
case 2:
|
2017-02-02 01:05:45 +00:00
|
|
|
// "Size_Format uses 1 bit. Regenerated_Size uses 5 bits (0-31)."
|
|
|
|
IO_rewind_bits(in, 1);
|
2017-02-23 22:34:52 +00:00
|
|
|
size = IO_read_bits(in, 5);
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2017-02-02 01:05:45 +00:00
|
|
|
// "Size_Format uses 2 bits. Regenerated_Size uses 12 bits (0-4095)."
|
|
|
|
size = IO_read_bits(in, 12);
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2017-02-02 01:05:45 +00:00
|
|
|
// "Size_Format uses 2 bits. Regenerated_Size uses 20 bits (0-1048575)."
|
|
|
|
size = IO_read_bits(in, 20);
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
default:
|
2017-02-02 01:05:45 +00:00
|
|
|
// Size format is in range 0-3
|
|
|
|
IMPOSSIBLE();
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size > MAX_LITERALS_SIZE) {
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
|
|
|
|
*literals = malloc(size);
|
|
|
|
if (!*literals) {
|
|
|
|
BAD_ALLOC();
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (block_type) {
|
2017-02-02 01:05:45 +00:00
|
|
|
case 0: {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Raw_Literals_Block - Literals are stored uncompressed."
|
2017-02-02 01:05:45 +00:00
|
|
|
const u8 *const read_ptr = IO_read_bytes(in, size);
|
|
|
|
memcpy(*literals, read_ptr, size);
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
2017-02-02 01:05:45 +00:00
|
|
|
}
|
|
|
|
case 1: {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "RLE_Literals_Block - Literals consist of a single byte value repeated N times."
|
2017-02-02 01:05:45 +00:00
|
|
|
const u8 *const read_ptr = IO_read_bytes(in, 1);
|
|
|
|
memset(*literals, read_ptr[0], size);
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-02-02 01:05:45 +00:00
|
|
|
default:
|
|
|
|
IMPOSSIBLE();
|
|
|
|
}
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Decodes Huffman compressed literals
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t decode_literals_compressed(frame_context_t *const ctx,
|
|
|
|
istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
u8 **const literals,
|
|
|
|
const int block_type,
|
|
|
|
const int size_format) {
|
2017-01-30 19:42:45 +00:00
|
|
|
size_t regenerated_size, compressed_size;
|
|
|
|
// Only size_format=0 has 1 stream, so default to 4
|
|
|
|
int num_streams = 4;
|
|
|
|
switch (size_format) {
|
|
|
|
case 0:
|
2017-02-02 01:05:45 +00:00
|
|
|
// "A single stream. Both Compressed_Size and Regenerated_Size use 10
|
|
|
|
// bits (0-1023)."
|
2017-01-30 19:42:45 +00:00
|
|
|
num_streams = 1;
|
|
|
|
// Fall through as it has the same size format
|
|
|
|
case 1:
|
2017-02-02 01:05:45 +00:00
|
|
|
// "4 streams. Both Compressed_Size and Regenerated_Size use 10 bits
|
|
|
|
// (0-1023)."
|
|
|
|
regenerated_size = IO_read_bits(in, 10);
|
|
|
|
compressed_size = IO_read_bits(in, 10);
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2017-02-02 01:05:45 +00:00
|
|
|
// "4 streams. Both Compressed_Size and Regenerated_Size use 14 bits
|
|
|
|
// (0-16383)."
|
|
|
|
regenerated_size = IO_read_bits(in, 14);
|
|
|
|
compressed_size = IO_read_bits(in, 14);
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2017-02-02 01:05:45 +00:00
|
|
|
// "4 streams. Both Compressed_Size and Regenerated_Size use 18 bits
|
|
|
|
// (0-262143)."
|
|
|
|
regenerated_size = IO_read_bits(in, 18);
|
|
|
|
compressed_size = IO_read_bits(in, 18);
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
// Impossible
|
2017-02-02 01:05:45 +00:00
|
|
|
IMPOSSIBLE();
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
if (regenerated_size > MAX_LITERALS_SIZE ||
|
2017-02-23 22:34:52 +00:00
|
|
|
compressed_size >= regenerated_size) {
|
2017-01-30 19:42:45 +00:00
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
|
|
|
|
*literals = malloc(regenerated_size);
|
|
|
|
if (!*literals) {
|
|
|
|
BAD_ALLOC();
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
ostream_t lit_stream = IO_make_ostream(*literals, regenerated_size);
|
|
|
|
istream_t huf_stream = IO_make_sub_istream(in, compressed_size);
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
if (block_type == 2) {
|
2017-03-23 21:33:38 +00:00
|
|
|
// Decode the provided Huffman table
|
2017-02-04 02:04:00 +00:00
|
|
|
// "This section is only present when Literals_Block_Type type is
|
|
|
|
// Compressed_Literals_Block (2)."
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
HUF_free_dtable(&ctx->literals_dtable);
|
2017-03-23 21:33:38 +00:00
|
|
|
decode_huf_table(&ctx->literals_dtable, &huf_stream);
|
2017-01-30 19:42:45 +00:00
|
|
|
} else {
|
2017-02-02 01:05:45 +00:00
|
|
|
// If the previous Huffman table is being repeated, ensure it exists
|
2017-01-30 19:42:45 +00:00
|
|
|
if (!ctx->literals_dtable.symbols) {
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
size_t symbols_decoded;
|
2017-01-30 19:42:45 +00:00
|
|
|
if (num_streams == 1) {
|
2017-02-02 01:05:45 +00:00
|
|
|
symbols_decoded = HUF_decompress_1stream(&ctx->literals_dtable, &lit_stream, &huf_stream);
|
2017-01-30 19:42:45 +00:00
|
|
|
} else {
|
2017-02-02 01:05:45 +00:00
|
|
|
symbols_decoded = HUF_decompress_4stream(&ctx->literals_dtable, &lit_stream, &huf_stream);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (symbols_decoded != regenerated_size) {
|
|
|
|
CORRUPTION();
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return regenerated_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode the Huffman table description
|
2017-03-23 21:33:38 +00:00
|
|
|
static void decode_huf_table(HUF_dtable *const dtable, istream_t *const in) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "All literal values from zero (included) to last present one (excluded)
|
|
|
|
// are represented by Weight with values from 0 to Max_Number_of_Bits."
|
|
|
|
|
|
|
|
// "This is a single byte value (0-255), which describes how to decode the list of weights."
|
2017-03-23 21:33:38 +00:00
|
|
|
const u8 header = IO_read_bits(in, 8);
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
u8 weights[HUF_MAX_SYMBS];
|
|
|
|
memset(weights, 0, sizeof(weights));
|
|
|
|
|
|
|
|
int num_symbs;
|
|
|
|
|
|
|
|
if (header >= 128) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "This is a direct representation, where each Weight is written
|
|
|
|
// directly as a 4 bits field (0-15). The full representation occupies
|
|
|
|
// ((Number_of_Symbols+1)/2) bytes, meaning it uses a last full byte
|
|
|
|
// even if Number_of_Symbols is odd. Number_of_Symbols = headerByte -
|
|
|
|
// 127"
|
2017-01-30 19:42:45 +00:00
|
|
|
num_symbs = header - 127;
|
2017-01-31 23:57:18 +00:00
|
|
|
const size_t bytes = (num_symbs + 1) / 2;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
const u8 *const weight_src = IO_read_bytes(in, bytes);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < num_symbs; i++) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "They are encoded forward, 2
|
|
|
|
// weights to a byte with the first weight taking the top four bits
|
|
|
|
// and the second taking the bottom four (e.g. the following
|
|
|
|
// operations could be used to read the weights: Weight[0] =
|
|
|
|
// (Byte[0] >> 4), Weight[1] = (Byte[0] & 0xf), etc.)."
|
2017-01-30 19:42:45 +00:00
|
|
|
if (i % 2 == 0) {
|
2017-02-02 01:05:45 +00:00
|
|
|
weights[i] = weight_src[i / 2] >> 4;
|
2017-01-30 19:42:45 +00:00
|
|
|
} else {
|
2017-02-02 01:05:45 +00:00
|
|
|
weights[i] = weight_src[i / 2] & 0xf;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// The weights are FSE encoded, decode them before we can construct the
|
|
|
|
// table
|
2017-02-02 01:05:45 +00:00
|
|
|
istream_t fse_stream = IO_make_sub_istream(in, header);
|
|
|
|
ostream_t weight_stream = IO_make_ostream(weights, HUF_MAX_SYMBS);
|
|
|
|
fse_decode_hufweights(&weight_stream, &fse_stream, &num_symbs);
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the table using the decoded weights
|
|
|
|
HUF_init_dtable_usingweights(dtable, weights, num_symbs);
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
static void fse_decode_hufweights(ostream_t *weights, istream_t *const in,
|
|
|
|
int *const num_symbs) {
|
2017-01-30 19:42:45 +00:00
|
|
|
const int MAX_ACCURACY_LOG = 7;
|
|
|
|
|
|
|
|
FSE_dtable dtable;
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "An FSE bitstream starts by a header, describing probabilities
|
|
|
|
// distribution. It will create a Decoding Table. For a list of Huffman
|
|
|
|
// weights, maximum accuracy is 7 bits."
|
2017-02-02 01:05:45 +00:00
|
|
|
FSE_decode_header(&dtable, in, MAX_ACCURACY_LOG);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Decode the weights
|
2017-02-02 01:05:45 +00:00
|
|
|
*num_symbs = FSE_decompress_interleaved2(&dtable, weights, in);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
FSE_free_dtable(&dtable);
|
|
|
|
}
|
|
|
|
/******* END LITERALS DECODING ************************************************/
|
|
|
|
|
|
|
|
/******* SEQUENCE DECODING ****************************************************/
|
|
|
|
/// The combination of FSE states needed to decode sequences
|
|
|
|
typedef struct {
|
2017-02-02 01:05:45 +00:00
|
|
|
FSE_dtable ll_table;
|
|
|
|
FSE_dtable of_table;
|
|
|
|
FSE_dtable ml_table;
|
|
|
|
|
|
|
|
u16 ll_state;
|
|
|
|
u16 of_state;
|
|
|
|
u16 ml_state;
|
2017-03-23 21:33:38 +00:00
|
|
|
} sequence_states_t;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
/// Different modes to signal to decode_seq_tables what to do
|
|
|
|
typedef enum {
|
|
|
|
seq_literal_length = 0,
|
|
|
|
seq_offset = 1,
|
|
|
|
seq_match_length = 2,
|
|
|
|
} seq_part_t;
|
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
seq_predefined = 0,
|
|
|
|
seq_rle = 1,
|
|
|
|
seq_fse = 2,
|
|
|
|
seq_repeat = 3,
|
|
|
|
} seq_mode_t;
|
|
|
|
|
|
|
|
/// The predefined FSE distribution tables for `seq_predefined` mode
|
|
|
|
static const i16 SEQ_LITERAL_LENGTH_DEFAULT_DIST[36] = {
|
|
|
|
4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2,
|
|
|
|
2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1};
|
|
|
|
static const i16 SEQ_OFFSET_DEFAULT_DIST[29] = {
|
|
|
|
1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1,
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1};
|
|
|
|
static const i16 SEQ_MATCH_LENGTH_DEFAULT_DIST[53] = {
|
|
|
|
1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1};
|
|
|
|
|
|
|
|
/// The sequence decoding baseline and number of additional bits to read/add
|
|
|
|
/// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
|
|
|
|
static const u32 SEQ_LITERAL_LENGTH_BASELINES[36] = {
|
|
|
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
|
|
|
|
12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40,
|
|
|
|
48, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65538};
|
|
|
|
static const u8 SEQ_LITERAL_LENGTH_EXTRA_BITS[36] = {
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
|
|
|
|
1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
|
|
|
|
|
|
|
|
static const u32 SEQ_MATCH_LENGTH_BASELINES[53] = {
|
|
|
|
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
|
|
|
|
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
|
|
|
|
31, 32, 33, 34, 35, 37, 39, 41, 43, 47, 51, 59, 67, 83,
|
|
|
|
99, 131, 259, 515, 1027, 2051, 4099, 8195, 16387, 32771, 65539};
|
|
|
|
static const u8 SEQ_MATCH_LENGTH_EXTRA_BITS[53] = {
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
|
|
|
|
2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
|
|
|
|
|
|
|
|
/// Offset decoding is simpler so we just need a maximum code value
|
|
|
|
static const u8 SEQ_MAX_CODES[3] = {35, -1, 52};
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
static void decompress_sequences(frame_context_t *const ctx,
|
|
|
|
istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
sequence_command_t *const sequences,
|
|
|
|
const size_t num_sequences);
|
2017-03-23 21:33:38 +00:00
|
|
|
static sequence_command_t decode_sequence(sequence_states_t *const state,
|
2017-01-31 23:57:18 +00:00
|
|
|
const u8 *const src,
|
|
|
|
i64 *const offset);
|
2017-03-23 21:33:38 +00:00
|
|
|
static void decode_seq_table(FSE_dtable *const table, istream_t *const in,
|
2017-02-02 01:05:45 +00:00
|
|
|
const seq_part_t type, const seq_mode_t mode);
|
2017-01-31 23:57:18 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t decode_sequences(frame_context_t *const ctx, istream_t *in,
|
2017-01-31 23:57:18 +00:00
|
|
|
sequence_command_t **const sequences) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "A compressed block is a succession of sequences . A sequence is a
|
|
|
|
// literal copy command, followed by a match copy command. A literal copy
|
|
|
|
// command specifies a length. It is the number of bytes to be copied (or
|
|
|
|
// extracted) from the literal section. A match copy command specifies an
|
|
|
|
// offset and a length. The offset gives the position to copy from, which
|
|
|
|
// can be within a previous block."
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
size_t num_sequences;
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Number_of_Sequences
|
|
|
|
//
|
|
|
|
// This is a variable size field using between 1 and 3 bytes. Let's call its
|
|
|
|
// first byte byte0."
|
2017-02-02 01:05:45 +00:00
|
|
|
u8 header = IO_read_bits(in, 8);
|
|
|
|
if (header == 0) {
|
|
|
|
// "There are no sequences. The sequence section stops there.
|
|
|
|
// Regenerated content is defined entirely by literals section."
|
2017-01-30 19:42:45 +00:00
|
|
|
*sequences = NULL;
|
|
|
|
return 0;
|
2017-02-02 01:05:45 +00:00
|
|
|
} else if (header < 128) {
|
|
|
|
// "Number_of_Sequences = byte0 . Uses 1 byte."
|
|
|
|
num_sequences = header;
|
|
|
|
} else if (header < 255) {
|
|
|
|
// "Number_of_Sequences = ((byte0-128) << 8) + byte1 . Uses 2 bytes."
|
|
|
|
num_sequences = ((header - 128) << 8) + IO_read_bits(in, 8);
|
2017-01-30 19:42:45 +00:00
|
|
|
} else {
|
2017-02-02 01:05:45 +00:00
|
|
|
// "Number_of_Sequences = byte1 + (byte2<<8) + 0x7F00 . Uses 3 bytes."
|
|
|
|
num_sequences = IO_read_bits(in, 16) + 0x7F00;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*sequences = malloc(num_sequences * sizeof(sequence_command_t));
|
|
|
|
if (!*sequences) {
|
|
|
|
BAD_ALLOC();
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
decompress_sequences(ctx, in, *sequences, num_sequences);
|
2017-01-30 19:42:45 +00:00
|
|
|
return num_sequences;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Decompress the FSE encoded sequence commands
|
2017-02-02 01:05:45 +00:00
|
|
|
static void decompress_sequences(frame_context_t *const ctx, istream_t *in,
|
2017-01-31 23:57:18 +00:00
|
|
|
sequence_command_t *const sequences,
|
|
|
|
const size_t num_sequences) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "The Sequences_Section regroup all symbols required to decode commands.
|
|
|
|
// There are 3 symbol types : literals lengths, offsets and match lengths.
|
|
|
|
// They are encoded together, interleaved, in a single bitstream."
|
|
|
|
|
|
|
|
// "Symbol compression modes
|
|
|
|
//
|
|
|
|
// This is a single byte, defining the compression mode of each symbol
|
|
|
|
// type."
|
|
|
|
//
|
|
|
|
// Bit number : Field name
|
|
|
|
// 7-6 : Literals_Lengths_Mode
|
|
|
|
// 5-4 : Offsets_Mode
|
|
|
|
// 3-2 : Match_Lengths_Mode
|
|
|
|
// 1-0 : Reserved
|
2017-02-02 01:05:45 +00:00
|
|
|
u8 compression_modes = IO_read_bits(in, 8);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
if ((compression_modes & 3) != 0) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// Reserved bits set
|
2017-01-30 19:42:45 +00:00
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Following the header, up to 3 distribution tables can be described. When
|
|
|
|
// present, they are in this order :
|
|
|
|
//
|
|
|
|
// Literals lengths
|
|
|
|
// Offsets
|
|
|
|
// Match Lengths"
|
2017-02-02 01:05:45 +00:00
|
|
|
// Update the tables we have stored in the context
|
2017-03-23 21:33:38 +00:00
|
|
|
decode_seq_table(&ctx->ll_dtable, in, seq_literal_length,
|
2017-02-02 01:05:45 +00:00
|
|
|
(compression_modes >> 6) & 3);
|
2017-01-31 23:57:18 +00:00
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
decode_seq_table(&ctx->of_dtable, in, seq_offset,
|
2017-02-02 01:05:45 +00:00
|
|
|
(compression_modes >> 4) & 3);
|
2017-01-31 23:57:18 +00:00
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
decode_seq_table(&ctx->ml_dtable, in, seq_match_length,
|
2017-02-02 01:05:45 +00:00
|
|
|
(compression_modes >> 2) & 3);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
sequence_states_t states;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
// Initialize the decoding tables
|
|
|
|
{
|
|
|
|
states.ll_table = ctx->ll_dtable;
|
|
|
|
states.of_table = ctx->of_dtable;
|
|
|
|
states.ml_table = ctx->ml_dtable;
|
|
|
|
}
|
|
|
|
|
|
|
|
const size_t len = IO_istream_len(in);
|
2017-02-02 01:05:45 +00:00
|
|
|
const u8 *const src = IO_read_bytes(in, len);
|
|
|
|
|
|
|
|
// "After writing the last bit containing information, the compressor writes
|
|
|
|
// a single 1-bit and then fills the byte with 0-7 0 bits of padding."
|
2017-03-22 00:01:51 +00:00
|
|
|
const int padding = 8 - highest_set_bit(src[len - 1]);
|
2017-03-23 21:33:38 +00:00
|
|
|
// The offset starts at the end because FSE streams are read backwards
|
|
|
|
i64 bit_offset = len * 8 - padding;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "The bitstream starts with initial state values, each using the required
|
|
|
|
// number of bits in their respective accuracy, decoded previously from
|
|
|
|
// their normalized distribution.
|
|
|
|
//
|
|
|
|
// It starts by Literals_Length_State, followed by Offset_State, and finally
|
|
|
|
// Match_Length_State."
|
2017-03-23 21:33:38 +00:00
|
|
|
FSE_init_state(&states.ll_table, &states.ll_state, src, &bit_offset);
|
|
|
|
FSE_init_state(&states.of_table, &states.of_state, src, &bit_offset);
|
|
|
|
FSE_init_state(&states.ml_table, &states.ml_state, src, &bit_offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
for (size_t i = 0; i < num_sequences; i++) {
|
|
|
|
// Decode sequences one by one
|
2017-03-23 21:33:38 +00:00
|
|
|
sequences[i] = decode_sequence(&states, src, &bit_offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
if (bit_offset != 0) {
|
2017-01-30 19:42:45 +00:00
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode a single sequence and update the state
|
2017-03-23 21:33:38 +00:00
|
|
|
static sequence_command_t decode_sequence(sequence_states_t *const states,
|
2017-01-31 23:57:18 +00:00
|
|
|
const u8 *const src,
|
|
|
|
i64 *const offset) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Each symbol is a code in its own context, which specifies Baseline and
|
|
|
|
// Number_of_Bits to add. Codes are FSE compressed, and interleaved with raw
|
|
|
|
// additional bits in the same bitstream."
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
// Decode symbols, but don't update states
|
2017-03-23 21:33:38 +00:00
|
|
|
const u8 of_code = FSE_peek_symbol(&states->of_table, states->of_state);
|
|
|
|
const u8 ll_code = FSE_peek_symbol(&states->ll_table, states->ll_state);
|
|
|
|
const u8 ml_code = FSE_peek_symbol(&states->ml_table, states->ml_state);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Offset doesn't need a max value as it's not decoded using a table
|
|
|
|
if (ll_code > SEQ_MAX_CODES[seq_literal_length] ||
|
|
|
|
ml_code > SEQ_MAX_CODES[seq_match_length]) {
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the interleaved bits
|
|
|
|
sequence_command_t seq;
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Decoding starts by reading the Number_of_Bits required to decode Offset.
|
|
|
|
// It then does the same for Match_Length, and then for Literals_Length."
|
2017-01-30 19:42:45 +00:00
|
|
|
seq.offset = ((u32)1 << of_code) + STREAM_read_bits(src, of_code, offset);
|
2017-02-04 02:04:00 +00:00
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
seq.match_length =
|
|
|
|
SEQ_MATCH_LENGTH_BASELINES[ml_code] +
|
|
|
|
STREAM_read_bits(src, SEQ_MATCH_LENGTH_EXTRA_BITS[ml_code], offset);
|
2017-02-04 02:04:00 +00:00
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
seq.literal_length =
|
|
|
|
SEQ_LITERAL_LENGTH_BASELINES[ll_code] +
|
|
|
|
STREAM_read_bits(src, SEQ_LITERAL_LENGTH_EXTRA_BITS[ll_code], offset);
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "If it is not the last sequence in the block, the next operation is to
|
|
|
|
// update states. Using the rules pre-calculated in the decoding tables,
|
|
|
|
// Literals_Length_State is updated, followed by Match_Length_State, and
|
|
|
|
// then Offset_State."
|
2017-01-30 19:42:45 +00:00
|
|
|
// If the stream is complete don't read bits to update state
|
|
|
|
if (*offset != 0) {
|
2017-03-23 21:33:38 +00:00
|
|
|
FSE_update_state(&states->ll_table, &states->ll_state, src, offset);
|
|
|
|
FSE_update_state(&states->ml_table, &states->ml_state, src, offset);
|
|
|
|
FSE_update_state(&states->of_table, &states->of_state, src, offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return seq;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given a sequence part and table mode, decode the FSE distribution
|
2017-03-23 21:33:38 +00:00
|
|
|
/// Errors if the mode is `seq_repeat` without a pre-existing table in `table`
|
|
|
|
static void decode_seq_table(FSE_dtable *const table, istream_t *const in,
|
|
|
|
const seq_part_t type, const seq_mode_t mode) {
|
2017-01-30 19:42:45 +00:00
|
|
|
// Constant arrays indexed by seq_part_t
|
|
|
|
const i16 *const default_distributions[] = {SEQ_LITERAL_LENGTH_DEFAULT_DIST,
|
|
|
|
SEQ_OFFSET_DEFAULT_DIST,
|
|
|
|
SEQ_MATCH_LENGTH_DEFAULT_DIST};
|
|
|
|
const size_t default_distribution_lengths[] = {36, 29, 53};
|
|
|
|
const size_t default_distribution_accuracies[] = {6, 5, 6};
|
|
|
|
|
|
|
|
const size_t max_accuracies[] = {9, 8, 9};
|
|
|
|
|
|
|
|
if (mode != seq_repeat) {
|
2017-02-02 01:05:45 +00:00
|
|
|
// Free old one before overwriting
|
2017-01-30 19:42:45 +00:00
|
|
|
FSE_free_dtable(table);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (mode) {
|
|
|
|
case seq_predefined: {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Predefined_Mode : uses a predefined distribution table."
|
2017-01-30 19:42:45 +00:00
|
|
|
const i16 *distribution = default_distributions[type];
|
|
|
|
const size_t symbs = default_distribution_lengths[type];
|
|
|
|
const size_t accuracy_log = default_distribution_accuracies[type];
|
|
|
|
|
|
|
|
FSE_init_dtable(table, distribution, symbs, accuracy_log);
|
2017-02-02 01:05:45 +00:00
|
|
|
break;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
case seq_rle: {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "RLE_Mode : it's a single code, repeated Number_of_Sequences times."
|
2017-02-07 22:44:11 +00:00
|
|
|
const u8 symb = IO_read_bytes(in, 1)[0];
|
2017-01-30 19:42:45 +00:00
|
|
|
FSE_init_dtable_rle(table, symb);
|
2017-02-02 01:05:45 +00:00
|
|
|
break;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
case seq_fse: {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "FSE_Compressed_Mode : standard FSE compression. A distribution table
|
|
|
|
// will be present "
|
2017-02-02 01:05:45 +00:00
|
|
|
FSE_decode_header(table, in, max_accuracies[type]);
|
|
|
|
break;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
case seq_repeat:
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Repeat_Mode : re-use distribution table from previous compressed
|
|
|
|
// block."
|
2017-02-02 01:05:45 +00:00
|
|
|
// Nothing to do here, table will be unchanged
|
2017-03-23 21:33:38 +00:00
|
|
|
if (!table->symbols) {
|
|
|
|
// This mode is invalid if we don't already have a table
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
2017-02-02 01:05:45 +00:00
|
|
|
break;
|
2017-01-30 19:42:45 +00:00
|
|
|
default:
|
|
|
|
// Impossible, as mode is from 0-3
|
2017-02-02 01:05:45 +00:00
|
|
|
IMPOSSIBLE();
|
|
|
|
break;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
2017-03-23 21:33:38 +00:00
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
/******* END SEQUENCE DECODING ************************************************/
|
|
|
|
|
|
|
|
/******* SEQUENCE EXECUTION ***************************************************/
|
2017-02-02 01:05:45 +00:00
|
|
|
static void execute_sequences(frame_context_t *const ctx, ostream_t *const out,
|
|
|
|
const u8 *const literals,
|
|
|
|
const size_t literals_len,
|
2017-01-31 23:57:18 +00:00
|
|
|
const sequence_command_t *const sequences,
|
2017-02-02 01:05:45 +00:00
|
|
|
const size_t num_sequences) {
|
|
|
|
istream_t litstream = IO_make_istream(literals, literals_len);
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
u64 *const offset_hist = ctx->previous_offsets;
|
2017-01-30 19:42:45 +00:00
|
|
|
size_t total_output = ctx->current_total_output;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < num_sequences; i++) {
|
2017-01-31 23:57:18 +00:00
|
|
|
const sequence_command_t seq = sequences[i];
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
{
|
2017-03-23 21:33:38 +00:00
|
|
|
// If the sequence asks for more literals than are left, the
|
|
|
|
// sequence must be corrupted
|
2017-02-02 01:05:45 +00:00
|
|
|
if (seq.literal_length > IO_istream_len(&litstream)) {
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
u8 *const write_ptr = IO_write_bytes(out, seq.literal_length);
|
|
|
|
const u8 *const read_ptr =
|
|
|
|
IO_read_bytes(&litstream, seq.literal_length);
|
|
|
|
// Copy literals to output
|
|
|
|
memcpy(write_ptr, read_ptr, seq.literal_length);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
total_output += seq.literal_length;
|
|
|
|
}
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
size_t offset;
|
|
|
|
|
|
|
|
// Offsets are special, we need to handle the repeat offsets
|
|
|
|
if (seq.offset <= 3) {
|
2017-02-02 01:05:45 +00:00
|
|
|
// "The first 3 values define a repeated offset and we will call
|
|
|
|
// them Repeated_Offset1, Repeated_Offset2, and Repeated_Offset3.
|
|
|
|
// They are sorted in recency order, with Repeated_Offset1 meaning
|
|
|
|
// 'most recent one'".
|
|
|
|
|
|
|
|
// Use 0 indexing for the array
|
|
|
|
u32 idx = seq.offset - 1;
|
2017-01-30 19:42:45 +00:00
|
|
|
if (seq.literal_length == 0) {
|
2017-02-02 01:05:45 +00:00
|
|
|
// "There is an exception though, when current sequence's
|
|
|
|
// literals length is 0. In this case, repeated offsets are
|
|
|
|
// shifted by one, so Repeated_Offset1 becomes Repeated_Offset2,
|
|
|
|
// Repeated_Offset2 becomes Repeated_Offset3, and
|
|
|
|
// Repeated_Offset3 becomes Repeated_Offset1 - 1_byte."
|
2017-01-30 19:42:45 +00:00
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
if (idx == 0) {
|
|
|
|
offset = offset_hist[0];
|
2017-01-30 19:42:45 +00:00
|
|
|
} else {
|
2017-02-02 01:05:45 +00:00
|
|
|
// If idx == 3 then literal length was 0 and the offset was 3,
|
|
|
|
// as per the exception listed above
|
|
|
|
offset = idx < 3 ? offset_hist[idx] : offset_hist[0] - 1;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
// If idx == 1 we don't need to modify offset_hist[2], since
|
|
|
|
// we're using the second-most recent code
|
2017-02-02 01:05:45 +00:00
|
|
|
if (idx > 1) {
|
|
|
|
offset_hist[2] = offset_hist[1];
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
2017-02-02 01:05:45 +00:00
|
|
|
offset_hist[1] = offset_hist[0];
|
|
|
|
offset_hist[0] = offset;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
} else {
|
2017-03-23 21:33:38 +00:00
|
|
|
// When it's not a repeat offset:
|
|
|
|
// "if (Offset_Value > 3) offset = Offset_Value - 3;"
|
2017-01-30 19:42:45 +00:00
|
|
|
offset = seq.offset - 3;
|
|
|
|
|
|
|
|
// Shift back history
|
|
|
|
offset_hist[2] = offset_hist[1];
|
2017-02-02 01:05:45 +00:00
|
|
|
offset_hist[1] = offset_hist[0];
|
|
|
|
offset_hist[0] = offset;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-01-31 23:54:02 +00:00
|
|
|
size_t match_length = seq.match_length;
|
2017-02-02 01:05:45 +00:00
|
|
|
|
|
|
|
u8 *write_ptr = IO_write_bytes(out, match_length);
|
2017-01-31 23:54:02 +00:00
|
|
|
if (total_output <= ctx->header.window_size) {
|
|
|
|
// In this case offset might go back into the dictionary
|
|
|
|
if (offset > total_output + ctx->dict_content_len) {
|
|
|
|
// The offset goes beyond even the dictionary
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-01-31 23:54:02 +00:00
|
|
|
if (offset > total_output) {
|
2017-02-02 01:05:45 +00:00
|
|
|
// "The rest of the dictionary is its content. The content act
|
|
|
|
// as a "past" in front of data to compress or decompress, so it
|
|
|
|
// can be referenced in sequence commands."
|
2017-01-31 23:54:02 +00:00
|
|
|
const size_t dict_copy =
|
|
|
|
MIN(offset - total_output, match_length);
|
|
|
|
const size_t dict_offset =
|
|
|
|
ctx->dict_content_len - (offset - total_output);
|
2017-02-02 01:05:45 +00:00
|
|
|
|
|
|
|
memcpy(write_ptr, ctx->dict_content + dict_offset, dict_copy);
|
|
|
|
write_ptr += dict_copy;
|
2017-01-31 23:54:02 +00:00
|
|
|
match_length -= dict_copy;
|
|
|
|
}
|
2017-02-01 18:41:04 +00:00
|
|
|
} else if (offset > ctx->header.window_size) {
|
|
|
|
CORRUPTION();
|
2017-01-31 23:54:02 +00:00
|
|
|
}
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-01-31 23:54:02 +00:00
|
|
|
// We must copy byte by byte because the match length might be larger
|
|
|
|
// than the offset
|
|
|
|
// ex: if the output so far was "abc", a command with offset=3 and
|
|
|
|
// match_length=6 would produce "abcabcabc" as the new output
|
2017-08-11 21:41:44 +00:00
|
|
|
for (size_t j = 0; j < match_length; j++) {
|
2017-02-02 01:05:45 +00:00
|
|
|
*write_ptr = *(write_ptr - offset);
|
|
|
|
write_ptr++;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-01-31 23:54:02 +00:00
|
|
|
total_output += seq.match_length;
|
|
|
|
}
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
// Copy any leftover literals
|
2017-02-02 01:05:45 +00:00
|
|
|
{
|
|
|
|
size_t len = IO_istream_len(&litstream);
|
|
|
|
u8 *const write_ptr = IO_write_bytes(out, len);
|
|
|
|
const u8 *const read_ptr = IO_read_bytes(&litstream, len);
|
|
|
|
memcpy(write_ptr, read_ptr, len);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
total_output += len;
|
|
|
|
}
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-01-31 23:54:02 +00:00
|
|
|
ctx->current_total_output = total_output;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
/******* END SEQUENCE EXECUTION ***********************************************/
|
|
|
|
|
2017-01-30 22:42:21 +00:00
|
|
|
/******* OUTPUT SIZE COUNTING *************************************************/
|
|
|
|
/// Get the decompressed size of an input stream so memory can be allocated in
|
|
|
|
/// advance.
|
2017-08-11 21:34:49 +00:00
|
|
|
/// This implementation assumes `src` points to a single ZSTD-compressed frame
|
2017-01-31 23:57:18 +00:00
|
|
|
size_t ZSTD_get_decompressed_size(const void *src, const size_t src_len) {
|
2017-02-02 01:05:45 +00:00
|
|
|
istream_t in = IO_make_istream(src, src_len);
|
|
|
|
|
2017-08-11 21:34:49 +00:00
|
|
|
// get decompressed size from ZSTD frame header
|
|
|
|
{
|
2017-02-02 01:05:45 +00:00
|
|
|
const u32 magic_number = IO_read_bits(&in, 32);
|
|
|
|
|
2017-08-11 21:34:49 +00:00
|
|
|
if (magic_number == 0xFD2FB528U) {
|
2017-02-02 01:05:45 +00:00
|
|
|
// ZSTD frame
|
|
|
|
frame_header_t header;
|
|
|
|
parse_frame_header(&header, &in);
|
|
|
|
|
|
|
|
if (header.frame_content_size == 0 && !header.single_segment_flag) {
|
|
|
|
// Content size not provided, we can't tell
|
|
|
|
return -1;
|
|
|
|
}
|
2017-01-30 22:42:21 +00:00
|
|
|
|
2017-08-11 21:34:49 +00:00
|
|
|
return header.frame_content_size;
|
2017-02-02 01:05:45 +00:00
|
|
|
} else {
|
2017-08-11 21:34:49 +00:00
|
|
|
// not a real frame or skippable frame
|
|
|
|
ERROR("ZSTD frame magic number did not match");
|
2017-01-30 22:42:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/******* END OUTPUT SIZE COUNTING *********************************************/
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
/******* DICTIONARY PARSING ***************************************************/
|
2017-08-12 01:40:19 +00:00
|
|
|
#define DICT_SIZE_ERROR() ERROR("Dictionary size cannot be less than 8 bytes")
|
|
|
|
|
|
|
|
dictionary_t* create_dictionary() {
|
|
|
|
dictionary_t* dict = calloc(1, sizeof(dictionary_t));
|
|
|
|
if (!dict) {
|
|
|
|
BAD_ALLOC();
|
|
|
|
}
|
|
|
|
return dict;
|
|
|
|
}
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
static void init_dictionary_content(dictionary_t *const dict,
|
|
|
|
istream_t *const in);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-08-12 00:53:37 +00:00
|
|
|
void parse_dictionary(dictionary_t *const dict, const void *src,
|
2017-01-30 19:42:45 +00:00
|
|
|
size_t src_len) {
|
2017-08-12 00:53:37 +00:00
|
|
|
const u8 *byte_src = (const u8 *)src;
|
2017-01-30 19:42:45 +00:00
|
|
|
memset(dict, 0, sizeof(dictionary_t));
|
|
|
|
if (src_len < 8) {
|
2017-08-12 01:40:19 +00:00
|
|
|
DICT_SIZE_ERROR();
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
2017-02-02 01:05:45 +00:00
|
|
|
|
2017-08-12 00:53:37 +00:00
|
|
|
istream_t in = IO_make_istream(byte_src, src_len);
|
2017-02-02 01:05:45 +00:00
|
|
|
|
|
|
|
const u32 magic_number = IO_read_bits(&in, 32);
|
2017-01-30 19:42:45 +00:00
|
|
|
if (magic_number != 0xEC30A437) {
|
|
|
|
// raw content dict
|
2017-02-04 02:04:00 +00:00
|
|
|
IO_rewind_bits(&in, 32);
|
|
|
|
init_dictionary_content(dict, &in);
|
2017-01-30 19:42:45 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
dict->dictionary_id = IO_read_bits(&in, 32);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Entropy_Tables : following the same format as the tables in compressed
|
|
|
|
// blocks. They are stored in following order : Huffman tables for literals,
|
|
|
|
// FSE table for offsets, FSE table for match lengths, and FSE table for
|
|
|
|
// literals lengths. It's finally followed by 3 offset values, populating
|
|
|
|
// recent offsets (instead of using {1,4,8}), stored in order, 4-bytes
|
|
|
|
// little-endian each, for a total of 12 bytes. Each recent offset must have
|
|
|
|
// a value < dictionary size."
|
2017-03-23 21:33:38 +00:00
|
|
|
decode_huf_table(&dict->literals_dtable, &in);
|
|
|
|
decode_seq_table(&dict->of_dtable, &in, seq_offset, seq_fse);
|
|
|
|
decode_seq_table(&dict->ml_dtable, &in, seq_match_length, seq_fse);
|
|
|
|
decode_seq_table(&dict->ll_dtable, &in, seq_literal_length, seq_fse);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Read in the previous offset history
|
2017-02-02 01:05:45 +00:00
|
|
|
dict->previous_offsets[0] = IO_read_bits(&in, 32);
|
|
|
|
dict->previous_offsets[1] = IO_read_bits(&in, 32);
|
|
|
|
dict->previous_offsets[2] = IO_read_bits(&in, 32);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
// Ensure the provided offsets aren't too large
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Each recent offset must have a value < dictionary size."
|
2017-02-02 01:05:45 +00:00
|
|
|
for (int i = 0; i < 3; i++) {
|
2017-01-30 19:42:45 +00:00
|
|
|
if (dict->previous_offsets[i] > src_len) {
|
|
|
|
ERROR("Dictionary corrupted");
|
|
|
|
}
|
|
|
|
}
|
2017-02-02 01:05:45 +00:00
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Content : The rest of the dictionary is its content. The content act as
|
|
|
|
// a "past" in front of data to compress or decompress, so it can be
|
|
|
|
// referenced in sequence commands."
|
|
|
|
init_dictionary_content(dict, &in);
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
static void init_dictionary_content(dictionary_t *const dict,
|
|
|
|
istream_t *const in) {
|
2017-01-30 19:42:45 +00:00
|
|
|
// Copy in the content
|
2017-02-04 02:04:00 +00:00
|
|
|
dict->content_size = IO_istream_len(in);
|
|
|
|
dict->content = malloc(dict->content_size);
|
2017-01-30 19:42:45 +00:00
|
|
|
if (!dict->content) {
|
|
|
|
BAD_ALLOC();
|
|
|
|
}
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
const u8 *const content = IO_read_bytes(in, dict->content_size);
|
|
|
|
|
|
|
|
memcpy(dict->content, content, dict->content_size);
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Free an allocated dictionary
|
2017-08-12 00:53:37 +00:00
|
|
|
void free_dictionary(dictionary_t *const dict) {
|
2017-01-30 19:42:45 +00:00
|
|
|
HUF_free_dtable(&dict->literals_dtable);
|
|
|
|
FSE_free_dtable(&dict->ll_dtable);
|
|
|
|
FSE_free_dtable(&dict->of_dtable);
|
|
|
|
FSE_free_dtable(&dict->ml_dtable);
|
|
|
|
|
|
|
|
free(dict->content);
|
|
|
|
|
|
|
|
memset(dict, 0, sizeof(dictionary_t));
|
|
|
|
}
|
|
|
|
/******* END DICTIONARY PARSING ***********************************************/
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
/******* IO STREAM OPERATIONS *************************************************/
|
|
|
|
#define UNALIGNED() ERROR("Attempting to operate on a non-byte aligned stream")
|
|
|
|
/// Reads `num` bits from a bitstream, and updates the internal offset
|
2017-03-22 00:01:51 +00:00
|
|
|
static inline u64 IO_read_bits(istream_t *const in, const int num_bits) {
|
|
|
|
if (num_bits > 64 || num_bits <= 0) {
|
2017-02-07 22:44:11 +00:00
|
|
|
ERROR("Attempt to read an invalid number of bits");
|
2017-02-02 01:05:45 +00:00
|
|
|
}
|
|
|
|
|
2017-03-22 00:01:51 +00:00
|
|
|
const size_t bytes = (num_bits + in->bit_offset + 7) / 8;
|
|
|
|
const size_t full_bytes = (num_bits + in->bit_offset) / 8;
|
2017-02-02 01:05:45 +00:00
|
|
|
if (bytes > in->len) {
|
|
|
|
INP_SIZE();
|
|
|
|
}
|
|
|
|
|
2017-03-22 00:01:51 +00:00
|
|
|
const u64 result = read_bits_LE(in->ptr, num_bits, in->bit_offset);
|
2017-02-02 01:05:45 +00:00
|
|
|
|
2017-03-22 00:01:51 +00:00
|
|
|
in->bit_offset = (num_bits + in->bit_offset) % 8;
|
2017-02-02 01:05:45 +00:00
|
|
|
in->ptr += full_bytes;
|
|
|
|
in->len -= full_bytes;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If a non-zero number of bits have been read from the current byte, advance
|
|
|
|
/// the offset to the next byte
|
2017-03-22 00:01:51 +00:00
|
|
|
static inline void IO_rewind_bits(istream_t *const in, int num_bits) {
|
|
|
|
if (num_bits < 0) {
|
2017-02-02 01:05:45 +00:00
|
|
|
ERROR("Attempting to rewind stream by a negative number of bits");
|
|
|
|
}
|
|
|
|
|
2017-03-22 00:01:51 +00:00
|
|
|
// move the offset back by `num_bits` bits
|
|
|
|
const int new_offset = in->bit_offset - num_bits;
|
|
|
|
// determine the number of whole bytes we have to rewind, rounding up to an
|
|
|
|
// integer number (e.g. if `new_offset == -5`, `bytes == 1`)
|
2017-03-21 22:03:23 +00:00
|
|
|
const i64 bytes = -(new_offset - 7) / 8;
|
|
|
|
|
|
|
|
in->ptr -= bytes;
|
|
|
|
in->len += bytes;
|
2017-03-22 00:01:51 +00:00
|
|
|
// make sure the resulting `bit_offset` is positive, as mod in C does not
|
|
|
|
// convert numbers from negative to positive (e.g. -22 % 8 == -6)
|
2017-02-02 01:05:45 +00:00
|
|
|
in->bit_offset = ((new_offset % 8) + 8) % 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If the remaining bits in a byte will be unused, advance to the end of the
|
|
|
|
/// byte
|
|
|
|
static inline void IO_align_stream(istream_t *const in) {
|
|
|
|
if (in->bit_offset != 0) {
|
|
|
|
if (in->len == 0) {
|
|
|
|
INP_SIZE();
|
|
|
|
}
|
|
|
|
in->ptr++;
|
|
|
|
in->len--;
|
|
|
|
in->bit_offset = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Write the given byte into the output stream
|
|
|
|
static inline void IO_write_byte(ostream_t *const out, u8 symb) {
|
|
|
|
if (out->len == 0) {
|
|
|
|
OUT_SIZE();
|
|
|
|
}
|
|
|
|
|
|
|
|
out->ptr[0] = symb;
|
|
|
|
out->ptr++;
|
|
|
|
out->len--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the number of bytes left to be read in this stream. The stream must
|
|
|
|
/// be byte aligned.
|
|
|
|
static inline size_t IO_istream_len(const istream_t *const in) {
|
|
|
|
return in->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns a pointer where `len` bytes can be read, and advances the internal
|
|
|
|
/// state. The stream must be byte aligned.
|
|
|
|
static inline const u8 *IO_read_bytes(istream_t *const in, size_t len) {
|
|
|
|
if (len > in->len) {
|
|
|
|
INP_SIZE();
|
|
|
|
}
|
|
|
|
if (in->bit_offset != 0) {
|
|
|
|
UNALIGNED();
|
|
|
|
}
|
|
|
|
const u8 *const ptr = in->ptr;
|
|
|
|
in->ptr += len;
|
|
|
|
in->len -= len;
|
|
|
|
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
/// Returns a pointer to write `len` bytes to, and advances the internal state
|
|
|
|
static inline u8 *IO_write_bytes(ostream_t *const out, size_t len) {
|
|
|
|
if (len > out->len) {
|
2017-02-23 22:34:52 +00:00
|
|
|
OUT_SIZE();
|
2017-02-02 01:05:45 +00:00
|
|
|
}
|
|
|
|
u8 *const ptr = out->ptr;
|
|
|
|
out->ptr += len;
|
|
|
|
out->len -= len;
|
|
|
|
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Advance the inner state by `len` bytes
|
|
|
|
static inline void IO_advance_input(istream_t *const in, size_t len) {
|
|
|
|
if (len > in->len) {
|
|
|
|
INP_SIZE();
|
|
|
|
}
|
|
|
|
if (in->bit_offset != 0) {
|
|
|
|
UNALIGNED();
|
|
|
|
}
|
|
|
|
|
|
|
|
in->ptr += len;
|
|
|
|
in->len -= len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns an `ostream_t` constructed from the given pointer and length
|
|
|
|
static inline ostream_t IO_make_ostream(u8 *out, size_t len) {
|
|
|
|
return (ostream_t) { out, len };
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns an `istream_t` constructed from the given pointer and length
|
|
|
|
static inline istream_t IO_make_istream(const u8 *in, size_t len) {
|
2017-03-23 21:33:38 +00:00
|
|
|
return (istream_t) { in, len, 0 };
|
2017-02-02 01:05:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns an `istream_t` with the same base as `in`, and length `len`
|
|
|
|
/// Then, advance `in` to account for the consumed bytes
|
|
|
|
/// `in` must be byte aligned
|
|
|
|
static inline istream_t IO_make_sub_istream(istream_t *const in, size_t len) {
|
2017-03-23 21:33:38 +00:00
|
|
|
// Consume `len` bytes of the parent stream
|
|
|
|
const u8 *const ptr = IO_read_bytes(in, len);
|
2017-02-02 01:05:45 +00:00
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
// Make a substream using the pointer to those `len` bytes
|
|
|
|
return IO_make_istream(ptr, len);
|
2017-02-02 01:05:45 +00:00
|
|
|
}
|
|
|
|
/******* END IO STREAM OPERATIONS *********************************************/
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
/******* BITSTREAM OPERATIONS *************************************************/
|
2017-01-31 23:57:18 +00:00
|
|
|
/// Read `num` bits (up to 64) from `src + offset`, where `offset` is in bits
|
2017-03-22 00:01:51 +00:00
|
|
|
static inline u64 read_bits_LE(const u8 *src, const int num_bits,
|
2017-01-31 23:57:18 +00:00
|
|
|
const size_t offset) {
|
2017-03-22 00:01:51 +00:00
|
|
|
if (num_bits > 64) {
|
2017-02-07 22:44:11 +00:00
|
|
|
ERROR("Attempt to read an invalid number of bits");
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
// Skip over bytes that aren't in range
|
2017-01-30 19:42:45 +00:00
|
|
|
src += offset / 8;
|
2017-01-31 23:57:18 +00:00
|
|
|
size_t bit_offset = offset % 8;
|
2017-01-30 19:42:45 +00:00
|
|
|
u64 res = 0;
|
|
|
|
|
|
|
|
int shift = 0;
|
2017-03-22 00:01:51 +00:00
|
|
|
int left = num_bits;
|
2017-01-30 19:42:45 +00:00
|
|
|
while (left > 0) {
|
|
|
|
u64 mask = left >= 8 ? 0xff : (((u64)1 << left) - 1);
|
2017-03-23 21:33:38 +00:00
|
|
|
// Read the next byte, shift it to account for the offset, and then mask
|
2017-01-31 23:57:18 +00:00
|
|
|
// out the top part if we don't need all the bits
|
|
|
|
res += (((u64)*src++ >> bit_offset) & mask) << shift;
|
|
|
|
shift += 8 - bit_offset;
|
|
|
|
left -= 8 - bit_offset;
|
|
|
|
bit_offset = 0;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
/// Read bits from the end of a HUF or FSE bitstream. `offset` is in bits, so
|
|
|
|
/// it updates `offset` to `offset - bits`, and then reads `bits` bits from
|
|
|
|
/// `src + offset`. If the offset becomes negative, the extra bits at the
|
|
|
|
/// bottom are filled in with `0` bits instead of reading from before `src`.
|
|
|
|
static inline u64 STREAM_read_bits(const u8 *const src, const int bits,
|
|
|
|
i64 *const offset) {
|
2017-01-30 19:42:45 +00:00
|
|
|
*offset = *offset - bits;
|
|
|
|
size_t actual_off = *offset;
|
2017-01-31 23:57:18 +00:00
|
|
|
size_t actual_bits = bits;
|
|
|
|
// Don't actually read bits from before the start of src, so if `*offset <
|
|
|
|
// 0` fix actual_off and actual_bits to reflect the quantity to read
|
2017-01-30 19:42:45 +00:00
|
|
|
if (*offset < 0) {
|
2017-01-31 23:57:18 +00:00
|
|
|
actual_bits += *offset;
|
2017-01-30 19:42:45 +00:00
|
|
|
actual_off = 0;
|
|
|
|
}
|
2017-01-31 23:57:18 +00:00
|
|
|
u64 res = read_bits_LE(src, actual_bits, actual_off);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
if (*offset < 0) {
|
|
|
|
// Fill in the bottom "overflowed" bits with 0's
|
|
|
|
res = -*offset >= 64 ? 0 : (res << -*offset);
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
/******* END BITSTREAM OPERATIONS *********************************************/
|
|
|
|
|
|
|
|
/******* BIT COUNTING OPERATIONS **********************************************/
|
2017-01-31 23:57:18 +00:00
|
|
|
/// Returns `x`, where `2^x` is the largest power of 2 less than or equal to
|
|
|
|
/// `num`, or `-1` if `num == 0`.
|
2017-03-22 00:01:51 +00:00
|
|
|
static inline int highest_set_bit(const u64 num) {
|
2017-01-30 19:42:45 +00:00
|
|
|
for (int i = 63; i >= 0; i--) {
|
|
|
|
if (((u64)1 << i) <= num) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/******* END BIT COUNTING OPERATIONS ******************************************/
|
|
|
|
|
|
|
|
/******* HUFFMAN PRIMITIVES ***************************************************/
|
2017-01-31 23:57:18 +00:00
|
|
|
static inline u8 HUF_decode_symbol(const HUF_dtable *const dtable,
|
|
|
|
u16 *const state, const u8 *const src,
|
|
|
|
i64 *const offset) {
|
2017-01-30 19:42:45 +00:00
|
|
|
// Look up the symbol and number of bits to read
|
|
|
|
const u8 symb = dtable->symbols[*state];
|
|
|
|
const u8 bits = dtable->num_bits[*state];
|
|
|
|
const u16 rest = STREAM_read_bits(src, bits, offset);
|
2017-01-31 23:57:18 +00:00
|
|
|
// Shift `bits` bits out of the state, keeping the low order bits that
|
|
|
|
// weren't necessary to determine this symbol. Then add in the new bits
|
|
|
|
// read from the stream.
|
2017-01-30 19:42:45 +00:00
|
|
|
*state = ((*state << bits) + rest) & (((u16)1 << dtable->max_bits) - 1);
|
|
|
|
|
|
|
|
return symb;
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static inline void HUF_init_state(const HUF_dtable *const dtable,
|
|
|
|
u16 *const state, const u8 *const src,
|
|
|
|
i64 *const offset) {
|
|
|
|
// Read in a full `dtable->max_bits` bits to initialize the state
|
2017-01-30 19:42:45 +00:00
|
|
|
const u8 bits = dtable->max_bits;
|
|
|
|
*state = STREAM_read_bits(src, bits, offset);
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t HUF_decompress_1stream(const HUF_dtable *const dtable,
|
|
|
|
ostream_t *const out,
|
|
|
|
istream_t *const in) {
|
|
|
|
const size_t len = IO_istream_len(in);
|
|
|
|
if (len == 0) {
|
|
|
|
INP_SIZE();
|
|
|
|
}
|
|
|
|
const u8 *const src = IO_read_bytes(in, len);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Each bitstream must be read backward, that is starting from the end down
|
|
|
|
// to the beginning. Therefore it's necessary to know the size of each
|
|
|
|
// bitstream.
|
|
|
|
//
|
|
|
|
// It's also necessary to know exactly which bit is the latest. This is
|
|
|
|
// detected by a final bit flag : the highest bit of latest byte is a
|
|
|
|
// final-bit-flag. Consequently, a last byte of 0 is not possible. And the
|
|
|
|
// final-bit-flag itself is not part of the useful bitstream. Hence, the
|
|
|
|
// last byte contains between 0 and 7 useful bits."
|
2017-03-22 00:01:51 +00:00
|
|
|
const int padding = 8 - highest_set_bit(src[len - 1]);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
// Offset starts at the end because HUF streams are read backwards
|
|
|
|
i64 bit_offset = len * 8 - padding;
|
2017-01-30 19:42:45 +00:00
|
|
|
u16 state;
|
|
|
|
|
2017-03-23 21:33:38 +00:00
|
|
|
HUF_init_state(dtable, &state, src, &bit_offset);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
size_t symbols_written = 0;
|
2017-03-23 21:33:38 +00:00
|
|
|
while (bit_offset > -dtable->max_bits) {
|
2017-01-31 23:57:18 +00:00
|
|
|
// Iterate over the stream, decoding one symbol at a time
|
2017-03-23 21:33:38 +00:00
|
|
|
IO_write_byte(out, HUF_decode_symbol(dtable, &state, src, &bit_offset));
|
2017-02-02 01:05:45 +00:00
|
|
|
symbols_written++;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
2017-02-04 02:04:00 +00:00
|
|
|
// "The process continues up to reading the required number of symbols per
|
|
|
|
// stream. If a bitstream is not entirely and exactly consumed, hence
|
|
|
|
// reaching exactly its beginning position with all bits consumed, the
|
|
|
|
// decoding process is considered faulty."
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
// When all symbols have been decoded, the final state value shouldn't have
|
|
|
|
// any data from the stream, so it should have "read" dtable->max_bits from
|
|
|
|
// before the start of `src`
|
|
|
|
// Therefore `offset`, the edge to start reading new bits at, should be
|
|
|
|
// dtable->max_bits before the start of the stream
|
2017-03-23 21:33:38 +00:00
|
|
|
if (bit_offset != -dtable->max_bits) {
|
2017-01-30 19:42:45 +00:00
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
return symbols_written;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
static size_t HUF_decompress_4stream(const HUF_dtable *const dtable,
|
|
|
|
ostream_t *const out, istream_t *const in) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Compressed size is provided explicitly : in the 4-streams variant,
|
|
|
|
// bitstreams are preceded by 3 unsigned little-endian 16-bits values. Each
|
|
|
|
// value represents the compressed size of one stream, in order. The last
|
|
|
|
// stream size is deducted from total compressed size and from previously
|
|
|
|
// decoded stream sizes"
|
2017-02-02 01:05:45 +00:00
|
|
|
const size_t csize1 = IO_read_bits(in, 16);
|
|
|
|
const size_t csize2 = IO_read_bits(in, 16);
|
|
|
|
const size_t csize3 = IO_read_bits(in, 16);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
istream_t in1 = IO_make_sub_istream(in, csize1);
|
|
|
|
istream_t in2 = IO_make_sub_istream(in, csize2);
|
|
|
|
istream_t in3 = IO_make_sub_istream(in, csize3);
|
|
|
|
istream_t in4 = IO_make_sub_istream(in, IO_istream_len(in));
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
size_t total_output = 0;
|
2017-01-31 23:57:18 +00:00
|
|
|
// Decode each stream independently for simplicity
|
|
|
|
// If we wanted to we could decode all 4 at the same time for speed,
|
|
|
|
// utilizing more execution units
|
2017-02-02 01:05:45 +00:00
|
|
|
total_output += HUF_decompress_1stream(dtable, out, &in1);
|
|
|
|
total_output += HUF_decompress_1stream(dtable, out, &in2);
|
|
|
|
total_output += HUF_decompress_1stream(dtable, out, &in3);
|
|
|
|
total_output += HUF_decompress_1stream(dtable, out, &in4);
|
|
|
|
|
|
|
|
return total_output;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-02-07 22:44:11 +00:00
|
|
|
/// Initializes a Huffman table using canonical Huffman codes
|
|
|
|
/// For more explanation on canonical Huffman codes see
|
|
|
|
/// http://www.cs.uofs.edu/~mccloske/courses/cmps340/huff_canonical_dec2015.html
|
|
|
|
/// Codes within a level are allocated in symbol order (i.e. smaller symbols get
|
|
|
|
/// earlier codes)
|
2017-01-31 23:57:18 +00:00
|
|
|
static void HUF_init_dtable(HUF_dtable *const table, const u8 *const bits,
|
|
|
|
const int num_symbs) {
|
2017-01-30 19:42:45 +00:00
|
|
|
memset(table, 0, sizeof(HUF_dtable));
|
|
|
|
if (num_symbs > HUF_MAX_SYMBS) {
|
|
|
|
ERROR("Too many symbols for Huffman");
|
|
|
|
}
|
|
|
|
|
|
|
|
u8 max_bits = 0;
|
|
|
|
u16 rank_count[HUF_MAX_BITS + 1];
|
|
|
|
memset(rank_count, 0, sizeof(rank_count));
|
|
|
|
|
|
|
|
// Count the number of symbols for each number of bits, and determine the
|
|
|
|
// depth of the tree
|
|
|
|
for (int i = 0; i < num_symbs; i++) {
|
|
|
|
if (bits[i] > HUF_MAX_BITS) {
|
|
|
|
ERROR("Huffman table depth too large");
|
|
|
|
}
|
|
|
|
max_bits = MAX(max_bits, bits[i]);
|
|
|
|
rank_count[bits[i]]++;
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
const size_t table_size = 1 << max_bits;
|
2017-01-30 19:42:45 +00:00
|
|
|
table->max_bits = max_bits;
|
|
|
|
table->symbols = malloc(table_size);
|
|
|
|
table->num_bits = malloc(table_size);
|
|
|
|
|
|
|
|
if (!table->symbols || !table->num_bits) {
|
|
|
|
free(table->symbols);
|
|
|
|
free(table->num_bits);
|
|
|
|
BAD_ALLOC();
|
|
|
|
}
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Symbols are sorted by Weight. Within same Weight, symbols keep natural
|
|
|
|
// order. Symbols with a Weight of zero are removed. Then, starting from
|
|
|
|
// lowest weight, prefix codes are distributed in order."
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
u32 rank_idx[HUF_MAX_BITS + 1];
|
|
|
|
// Initialize the starting codes for each rank (number of bits)
|
|
|
|
rank_idx[max_bits] = 0;
|
|
|
|
for (int i = max_bits; i >= 1; i--) {
|
|
|
|
rank_idx[i - 1] = rank_idx[i] + rank_count[i] * (1 << (max_bits - i));
|
|
|
|
// The entire range takes the same number of bits so we can memset it
|
|
|
|
memset(&table->num_bits[rank_idx[i]], i, rank_idx[i - 1] - rank_idx[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rank_idx[0] != table_size) {
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate codes and fill in the table
|
|
|
|
for (int i = 0; i < num_symbs; i++) {
|
|
|
|
if (bits[i] != 0) {
|
|
|
|
// Allocate a code for this symbol and set its range in the table
|
|
|
|
const u16 code = rank_idx[bits[i]];
|
2017-01-31 23:57:18 +00:00
|
|
|
// Since the code doesn't care about the bottom `max_bits - bits[i]`
|
|
|
|
// bits of state, it gets a range that spans all possible values of
|
|
|
|
// the lower bits
|
2017-01-30 19:42:45 +00:00
|
|
|
const u16 len = 1 << (max_bits - bits[i]);
|
|
|
|
memset(&table->symbols[code], i, len);
|
|
|
|
rank_idx[bits[i]] += len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static void HUF_init_dtable_usingweights(HUF_dtable *const table,
|
|
|
|
const u8 *const weights,
|
|
|
|
const int num_symbs) {
|
2017-01-30 19:42:45 +00:00
|
|
|
// +1 because the last weight is not transmitted in the header
|
|
|
|
if (num_symbs + 1 > HUF_MAX_SYMBS) {
|
|
|
|
ERROR("Too many symbols for Huffman");
|
|
|
|
}
|
|
|
|
|
|
|
|
u8 bits[HUF_MAX_SYMBS];
|
|
|
|
|
|
|
|
u64 weight_sum = 0;
|
|
|
|
for (int i = 0; i < num_symbs; i++) {
|
2017-02-02 01:05:45 +00:00
|
|
|
// Weights are in the same range as bit count
|
|
|
|
if (weights[i] > HUF_MAX_BITS) {
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
2017-01-30 19:42:45 +00:00
|
|
|
weight_sum += weights[i] > 0 ? (u64)1 << (weights[i] - 1) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find the first power of 2 larger than the sum
|
2017-03-22 00:01:51 +00:00
|
|
|
const int max_bits = highest_set_bit(weight_sum) + 1;
|
2017-01-31 23:57:18 +00:00
|
|
|
const u64 left_over = ((u64)1 << max_bits) - weight_sum;
|
2017-01-30 19:42:45 +00:00
|
|
|
// If the left over isn't a power of 2, the weights are invalid
|
|
|
|
if (left_over & (left_over - 1)) {
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
// left_over is used to find the last weight as it's not transmitted
|
|
|
|
// by inverting 2^(weight - 1) we can determine the value of last_weight
|
2017-03-22 00:01:51 +00:00
|
|
|
const int last_weight = highest_set_bit(left_over) + 1;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < num_symbs; i++) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Number_of_Bits = Number_of_Bits ? Max_Number_of_Bits + 1 - Weight : 0"
|
2017-01-30 19:42:45 +00:00
|
|
|
bits[i] = weights[i] > 0 ? (max_bits + 1 - weights[i]) : 0;
|
|
|
|
}
|
|
|
|
bits[num_symbs] =
|
2017-01-31 23:57:18 +00:00
|
|
|
max_bits + 1 - last_weight; // Last weight is always non-zero
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
HUF_init_dtable(table, bits, num_symbs + 1);
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static void HUF_free_dtable(HUF_dtable *const dtable) {
|
2017-01-30 19:42:45 +00:00
|
|
|
free(dtable->symbols);
|
|
|
|
free(dtable->num_bits);
|
|
|
|
memset(dtable, 0, sizeof(HUF_dtable));
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static void HUF_copy_dtable(HUF_dtable *const dst,
|
|
|
|
const HUF_dtable *const src) {
|
2017-01-30 19:42:45 +00:00
|
|
|
if (src->max_bits == 0) {
|
|
|
|
memset(dst, 0, sizeof(HUF_dtable));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
const size_t size = (size_t)1 << src->max_bits;
|
2017-01-30 19:42:45 +00:00
|
|
|
dst->max_bits = src->max_bits;
|
|
|
|
|
|
|
|
dst->symbols = malloc(size);
|
|
|
|
dst->num_bits = malloc(size);
|
|
|
|
if (!dst->symbols || !dst->num_bits) {
|
|
|
|
BAD_ALLOC();
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(dst->symbols, src->symbols, size);
|
|
|
|
memcpy(dst->num_bits, src->num_bits, size);
|
|
|
|
}
|
|
|
|
/******* END HUFFMAN PRIMITIVES ***********************************************/
|
|
|
|
|
|
|
|
/******* FSE PRIMITIVES *******************************************************/
|
2017-02-07 22:44:11 +00:00
|
|
|
/// For more description of FSE see
|
|
|
|
/// https://github.com/Cyan4973/FiniteStateEntropy/
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
/// Allow a symbol to be decoded without updating state
|
|
|
|
static inline u8 FSE_peek_symbol(const FSE_dtable *const dtable,
|
|
|
|
const u16 state) {
|
2017-01-30 19:42:45 +00:00
|
|
|
return dtable->symbols[state];
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
/// Consumes bits from the input and uses the current state to determine the
|
|
|
|
/// next state
|
|
|
|
static inline void FSE_update_state(const FSE_dtable *const dtable,
|
|
|
|
u16 *const state, const u8 *const src,
|
|
|
|
i64 *const offset) {
|
2017-01-30 19:42:45 +00:00
|
|
|
const u8 bits = dtable->num_bits[*state];
|
|
|
|
const u16 rest = STREAM_read_bits(src, bits, offset);
|
|
|
|
*state = dtable->new_state_base[*state] + rest;
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
/// Decodes a single FSE symbol and updates the offset
|
|
|
|
static inline u8 FSE_decode_symbol(const FSE_dtable *const dtable,
|
|
|
|
u16 *const state, const u8 *const src,
|
|
|
|
i64 *const offset) {
|
2017-01-30 19:42:45 +00:00
|
|
|
const u8 symb = FSE_peek_symbol(dtable, *state);
|
|
|
|
FSE_update_state(dtable, state, src, offset);
|
|
|
|
return symb;
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static inline void FSE_init_state(const FSE_dtable *const dtable,
|
|
|
|
u16 *const state, const u8 *const src,
|
|
|
|
i64 *const offset) {
|
|
|
|
// Read in a full `accuracy_log` bits to initialize the state
|
2017-01-30 19:42:45 +00:00
|
|
|
const u8 bits = dtable->accuracy_log;
|
|
|
|
*state = STREAM_read_bits(src, bits, offset);
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static size_t FSE_decompress_interleaved2(const FSE_dtable *const dtable,
|
2017-02-02 01:05:45 +00:00
|
|
|
ostream_t *const out,
|
|
|
|
istream_t *const in) {
|
|
|
|
const size_t len = IO_istream_len(in);
|
|
|
|
if (len == 0) {
|
2017-01-30 19:42:45 +00:00
|
|
|
INP_SIZE();
|
|
|
|
}
|
2017-02-02 01:05:45 +00:00
|
|
|
const u8 *const src = IO_read_bytes(in, len);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Each bitstream must be read backward, that is starting from the end down
|
|
|
|
// to the beginning. Therefore it's necessary to know the size of each
|
|
|
|
// bitstream.
|
|
|
|
//
|
|
|
|
// It's also necessary to know exactly which bit is the latest. This is
|
|
|
|
// detected by a final bit flag : the highest bit of latest byte is a
|
|
|
|
// final-bit-flag. Consequently, a last byte of 0 is not possible. And the
|
|
|
|
// final-bit-flag itself is not part of the useful bitstream. Hence, the
|
|
|
|
// last byte contains between 0 and 7 useful bits."
|
2017-03-22 00:01:51 +00:00
|
|
|
const int padding = 8 - highest_set_bit(src[len - 1]);
|
2017-02-02 01:05:45 +00:00
|
|
|
i64 offset = len * 8 - padding;
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
u16 state1, state2;
|
2017-02-04 02:04:00 +00:00
|
|
|
// "The first state (State1) encodes the even indexed symbols, and the
|
|
|
|
// second (State2) encodes the odd indexes. State1 is initialized first, and
|
|
|
|
// then State2, and they take turns decoding a single symbol and updating
|
|
|
|
// their state."
|
2017-01-30 19:42:45 +00:00
|
|
|
FSE_init_state(dtable, &state1, src, &offset);
|
|
|
|
FSE_init_state(dtable, &state2, src, &offset);
|
|
|
|
|
|
|
|
// Decode until we overflow the stream
|
|
|
|
// Since we decode in reverse order, overflowing the stream is offset going
|
|
|
|
// negative
|
2017-02-02 01:05:45 +00:00
|
|
|
size_t symbols_written = 0;
|
2017-01-30 19:42:45 +00:00
|
|
|
while (1) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "The number of symbols to decode is determined by tracking bitStream
|
|
|
|
// overflow condition: If updating state after decoding a symbol would
|
|
|
|
// require more bits than remain in the stream, it is assumed the extra
|
|
|
|
// bits are 0. Then, the symbols for each of the final states are
|
|
|
|
// decoded and the process is complete."
|
2017-02-02 01:05:45 +00:00
|
|
|
IO_write_byte(out, FSE_decode_symbol(dtable, &state1, src, &offset));
|
|
|
|
symbols_written++;
|
2017-01-30 19:42:45 +00:00
|
|
|
if (offset < 0) {
|
|
|
|
// There's still a symbol to decode in state2
|
2017-02-02 01:05:45 +00:00
|
|
|
IO_write_byte(out, FSE_peek_symbol(dtable, state2));
|
|
|
|
symbols_written++;
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
IO_write_byte(out, FSE_decode_symbol(dtable, &state2, src, &offset));
|
|
|
|
symbols_written++;
|
2017-01-30 19:42:45 +00:00
|
|
|
if (offset < 0) {
|
|
|
|
// There's still a symbol to decode in state1
|
2017-02-02 01:05:45 +00:00
|
|
|
IO_write_byte(out, FSE_peek_symbol(dtable, state1));
|
|
|
|
symbols_written++;
|
2017-01-30 19:42:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
return symbols_written;
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static void FSE_init_dtable(FSE_dtable *const dtable,
|
|
|
|
const i16 *const norm_freqs, const int num_symbs,
|
|
|
|
const int accuracy_log) {
|
2017-01-30 19:42:45 +00:00
|
|
|
if (accuracy_log > FSE_MAX_ACCURACY_LOG) {
|
|
|
|
ERROR("FSE accuracy too large");
|
|
|
|
}
|
|
|
|
if (num_symbs > FSE_MAX_SYMBS) {
|
|
|
|
ERROR("Too many symbols for FSE");
|
|
|
|
}
|
|
|
|
|
|
|
|
dtable->accuracy_log = accuracy_log;
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
const size_t size = (size_t)1 << accuracy_log;
|
2017-01-30 19:42:45 +00:00
|
|
|
dtable->symbols = malloc(size * sizeof(u8));
|
|
|
|
dtable->num_bits = malloc(size * sizeof(u8));
|
|
|
|
dtable->new_state_base = malloc(size * sizeof(u16));
|
|
|
|
|
2017-01-30 22:57:02 +00:00
|
|
|
if (!dtable->symbols || !dtable->num_bits || !dtable->new_state_base) {
|
|
|
|
BAD_ALLOC();
|
|
|
|
}
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
// Used to determine how many bits need to be read for each state,
|
|
|
|
// and where the destination range should start
|
|
|
|
// Needs to be u16 because max value is 2 * max number of symbols,
|
|
|
|
// which can be larger than a byte can store
|
|
|
|
u16 state_desc[FSE_MAX_SYMBS];
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Symbols are scanned in their natural order for "less than 1"
|
|
|
|
// probabilities. Symbols with this probability are being attributed a
|
|
|
|
// single cell, starting from the end of the table. These symbols define a
|
|
|
|
// full state reset, reading Accuracy_Log bits."
|
2017-01-30 19:42:45 +00:00
|
|
|
int high_threshold = size;
|
|
|
|
for (int s = 0; s < num_symbs; s++) {
|
|
|
|
// Scan for low probability symbols to put at the top
|
|
|
|
if (norm_freqs[s] == -1) {
|
|
|
|
dtable->symbols[--high_threshold] = s;
|
|
|
|
state_desc[s] = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "All remaining symbols are sorted in their natural order. Starting from
|
|
|
|
// symbol 0 and table position 0, each symbol gets attributed as many cells
|
|
|
|
// as its probability. Cell allocation is spreaded, not linear."
|
2017-01-30 19:42:45 +00:00
|
|
|
// Place the rest in the table
|
2017-01-31 23:57:18 +00:00
|
|
|
const u16 step = (size >> 1) + (size >> 3) + 3;
|
|
|
|
const u16 mask = size - 1;
|
2017-01-30 19:42:45 +00:00
|
|
|
u16 pos = 0;
|
|
|
|
for (int s = 0; s < num_symbs; s++) {
|
|
|
|
if (norm_freqs[s] <= 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
state_desc[s] = norm_freqs[s];
|
|
|
|
|
|
|
|
for (int i = 0; i < norm_freqs[s]; i++) {
|
2017-01-31 23:57:18 +00:00
|
|
|
// Give `norm_freqs[s]` states to symbol s
|
2017-01-30 19:42:45 +00:00
|
|
|
dtable->symbols[pos] = s;
|
2017-02-04 02:04:00 +00:00
|
|
|
// "A position is skipped if already occupied, typically by a "less
|
|
|
|
// than 1" probability symbol."
|
2017-01-30 19:42:45 +00:00
|
|
|
do {
|
|
|
|
pos = (pos + step) & mask;
|
|
|
|
} while (pos >=
|
2017-02-04 02:04:00 +00:00
|
|
|
high_threshold);
|
2017-01-30 19:42:45 +00:00
|
|
|
// Note: no other collision checking is necessary as `step` is
|
2017-01-30 22:42:21 +00:00
|
|
|
// coprime to `size`, so the cycle will visit each position exactly
|
|
|
|
// once
|
2017-01-30 19:42:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (pos != 0) {
|
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we can fill baseline and num bits
|
2017-08-11 21:41:44 +00:00
|
|
|
for (size_t i = 0; i < size; i++) {
|
2017-01-30 19:42:45 +00:00
|
|
|
u8 symbol = dtable->symbols[i];
|
|
|
|
u16 next_state_desc = state_desc[symbol]++;
|
2017-01-31 23:57:18 +00:00
|
|
|
// Fills in the table appropriately, next_state_desc increases by symbol
|
2017-01-30 22:42:21 +00:00
|
|
|
// over time, decreasing number of bits
|
2017-03-22 00:01:51 +00:00
|
|
|
dtable->num_bits[i] = (u8)(accuracy_log - highest_set_bit(next_state_desc));
|
2017-01-31 23:57:18 +00:00
|
|
|
// Baseline increases until the bit threshold is passed, at which point
|
2017-01-30 22:42:21 +00:00
|
|
|
// it resets to 0
|
2017-01-30 19:42:45 +00:00
|
|
|
dtable->new_state_base[i] =
|
|
|
|
((u16)next_state_desc << dtable->num_bits[i]) - size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
/// Decode an FSE header as defined in the Zstandard format specification and
|
|
|
|
/// use the decoded frequencies to initialize a decoding table.
|
2017-02-02 01:05:45 +00:00
|
|
|
static void FSE_decode_header(FSE_dtable *const dtable, istream_t *const in,
|
2017-01-31 23:57:18 +00:00
|
|
|
const int max_accuracy_log) {
|
2017-02-04 02:04:00 +00:00
|
|
|
// "An FSE distribution table describes the probabilities of all symbols
|
|
|
|
// from 0 to the last present one (included) on a normalized scale of 1 <<
|
|
|
|
// Accuracy_Log .
|
|
|
|
//
|
|
|
|
// It's a bitstream which is read forward, in little-endian fashion. It's
|
|
|
|
// not necessary to know its exact size, since it will be discovered and
|
|
|
|
// reported by the decoding process.
|
2017-01-30 19:42:45 +00:00
|
|
|
if (max_accuracy_log > FSE_MAX_ACCURACY_LOG) {
|
|
|
|
ERROR("FSE accuracy too large");
|
|
|
|
}
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// The bitstream starts by reporting on which scale it operates.
|
|
|
|
// Accuracy_Log = low4bits + 5. Note that maximum Accuracy_Log for literal
|
|
|
|
// and match lengths is 9, and for offsets is 8. Higher values are
|
|
|
|
// considered errors."
|
2017-02-02 01:05:45 +00:00
|
|
|
const int accuracy_log = 5 + IO_read_bits(in, 4);
|
2017-01-30 19:42:45 +00:00
|
|
|
if (accuracy_log > max_accuracy_log) {
|
|
|
|
ERROR("FSE accuracy too large");
|
|
|
|
}
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Then follows each symbol value, from 0 to last present one. The number
|
|
|
|
// of bits used by each field is variable. It depends on :
|
|
|
|
//
|
|
|
|
// Remaining probabilities + 1 : example : Presuming an Accuracy_Log of 8,
|
|
|
|
// and presuming 100 probabilities points have already been distributed, the
|
|
|
|
// decoder may read any value from 0 to 255 - 100 + 1 == 156 (inclusive).
|
|
|
|
// Therefore, it must read log2sup(156) == 8 bits.
|
|
|
|
//
|
|
|
|
// Value decoded : small values use 1 less bit : example : Presuming values
|
|
|
|
// from 0 to 156 (inclusive) are possible, 255-156 = 99 values are remaining
|
|
|
|
// in an 8-bits field. They are used this way : first 99 values (hence from
|
|
|
|
// 0 to 98) use only 7 bits, values from 99 to 156 use 8 bits. "
|
|
|
|
|
|
|
|
i32 remaining = 1 << accuracy_log;
|
2017-01-30 19:42:45 +00:00
|
|
|
i16 frequencies[FSE_MAX_SYMBS];
|
|
|
|
|
|
|
|
int symb = 0;
|
2017-02-04 02:04:00 +00:00
|
|
|
while (remaining > 0 && symb < FSE_MAX_SYMBS) {
|
2017-01-31 23:57:18 +00:00
|
|
|
// Log of the number of possible values we could read
|
2017-03-22 00:01:51 +00:00
|
|
|
int bits = highest_set_bit(remaining + 1) + 1;
|
2017-01-31 23:57:18 +00:00
|
|
|
|
2017-02-02 01:05:45 +00:00
|
|
|
u16 val = IO_read_bits(in, bits);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
// Try to mask out the lower bits to see if it qualifies for the "small
|
2017-01-30 19:42:45 +00:00
|
|
|
// value" threshold
|
2017-01-31 23:57:18 +00:00
|
|
|
const u16 lower_mask = ((u16)1 << (bits - 1)) - 1;
|
2017-02-04 02:04:00 +00:00
|
|
|
const u16 threshold = ((u16)1 << bits) - 1 - (remaining + 1);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
if ((val & lower_mask) < threshold) {
|
2017-02-02 01:05:45 +00:00
|
|
|
IO_rewind_bits(in, 1);
|
2017-01-30 19:42:45 +00:00
|
|
|
val = val & lower_mask;
|
|
|
|
} else if (val > lower_mask) {
|
|
|
|
val = val - threshold;
|
|
|
|
}
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "Probability is obtained from Value decoded by following formula :
|
|
|
|
// Proba = value - 1"
|
2017-01-31 23:57:18 +00:00
|
|
|
const i16 proba = (i16)val - 1;
|
2017-02-04 02:04:00 +00:00
|
|
|
|
|
|
|
// "It means value 0 becomes negative probability -1. -1 is a special
|
|
|
|
// probability, which means "less than 1". Its effect on distribution
|
|
|
|
// table is described in next paragraph. For the purpose of calculating
|
|
|
|
// cumulated distribution, it counts as one."
|
2017-01-30 19:42:45 +00:00
|
|
|
remaining -= proba < 0 ? -proba : proba;
|
|
|
|
|
|
|
|
frequencies[symb] = proba;
|
|
|
|
symb++;
|
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "When a symbol has a probability of zero, it is followed by a 2-bits
|
|
|
|
// repeat flag. This repeat flag tells how many probabilities of zeroes
|
|
|
|
// follow the current one. It provides a number ranging from 0 to 3. If
|
|
|
|
// it is a 3, another 2-bits repeat flag follows, and so on."
|
2017-01-30 19:42:45 +00:00
|
|
|
if (proba == 0) {
|
2017-01-31 23:57:18 +00:00
|
|
|
// Read the next two bits to see how many more 0s
|
2017-02-02 01:05:45 +00:00
|
|
|
int repeat = IO_read_bits(in, 2);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
|
|
|
while (1) {
|
|
|
|
for (int i = 0; i < repeat && symb < FSE_MAX_SYMBS; i++) {
|
|
|
|
frequencies[symb++] = 0;
|
|
|
|
}
|
|
|
|
if (repeat == 3) {
|
2017-02-02 01:05:45 +00:00
|
|
|
repeat = IO_read_bits(in, 2);
|
2017-01-30 19:42:45 +00:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-02-02 01:05:45 +00:00
|
|
|
IO_align_stream(in);
|
2017-01-30 19:42:45 +00:00
|
|
|
|
2017-02-04 02:04:00 +00:00
|
|
|
// "When last symbol reaches cumulated total of 1 << Accuracy_Log, decoding
|
|
|
|
// is complete. If the last symbol makes cumulated total go above 1 <<
|
|
|
|
// Accuracy_Log, distribution is considered corrupted."
|
|
|
|
if (remaining != 0 || symb >= FSE_MAX_SYMBS) {
|
2017-01-30 19:42:45 +00:00
|
|
|
CORRUPTION();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the decoding table using the determined weights
|
|
|
|
FSE_init_dtable(dtable, frequencies, symb, accuracy_log);
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static void FSE_init_dtable_rle(FSE_dtable *const dtable, const u8 symb) {
|
2017-01-30 19:42:45 +00:00
|
|
|
dtable->symbols = malloc(sizeof(u8));
|
|
|
|
dtable->num_bits = malloc(sizeof(u8));
|
|
|
|
dtable->new_state_base = malloc(sizeof(u16));
|
|
|
|
|
2017-01-30 22:57:02 +00:00
|
|
|
if (!dtable->symbols || !dtable->num_bits || !dtable->new_state_base) {
|
|
|
|
BAD_ALLOC();
|
|
|
|
}
|
|
|
|
|
2017-01-30 19:42:45 +00:00
|
|
|
// This setup will always have a state of 0, always return symbol `symb`,
|
2017-01-30 22:42:21 +00:00
|
|
|
// and never consume any bits
|
2017-01-30 19:42:45 +00:00
|
|
|
dtable->symbols[0] = symb;
|
|
|
|
dtable->num_bits[0] = 0;
|
|
|
|
dtable->new_state_base[0] = 0;
|
|
|
|
dtable->accuracy_log = 0;
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static void FSE_free_dtable(FSE_dtable *const dtable) {
|
2017-01-30 19:42:45 +00:00
|
|
|
free(dtable->symbols);
|
|
|
|
free(dtable->num_bits);
|
|
|
|
free(dtable->new_state_base);
|
|
|
|
memset(dtable, 0, sizeof(FSE_dtable));
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:18 +00:00
|
|
|
static void FSE_copy_dtable(FSE_dtable *const dst, const FSE_dtable *const src) {
|
2017-01-30 19:42:45 +00:00
|
|
|
if (src->accuracy_log == 0) {
|
|
|
|
memset(dst, 0, sizeof(FSE_dtable));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t size = (size_t)1 << src->accuracy_log;
|
|
|
|
dst->accuracy_log = src->accuracy_log;
|
|
|
|
|
|
|
|
dst->symbols = malloc(size);
|
|
|
|
dst->num_bits = malloc(size);
|
|
|
|
dst->new_state_base = malloc(size * sizeof(u16));
|
|
|
|
if (!dst->symbols || !dst->num_bits || !dst->new_state_base) {
|
|
|
|
BAD_ALLOC();
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(dst->symbols, src->symbols, size);
|
|
|
|
memcpy(dst->num_bits, src->num_bits, size);
|
|
|
|
memcpy(dst->new_state_base, src->new_state_base, size * sizeof(u16));
|
|
|
|
}
|
|
|
|
/******* END FSE PRIMITIVES ***************************************************/
|