2015-11-27 10:27:11 +00:00
|
|
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
|
|
|
|
2015-12-11 10:11:51 +00:00
|
|
|
Distributed under MIT license.
|
2015-11-27 10:27:11 +00:00
|
|
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
|
|
|
*/
|
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Sliding window over the input data. */
|
2013-11-15 18:02:17 +00:00
|
|
|
|
|
|
|
#ifndef BROTLI_ENC_RINGBUFFER_H_
|
|
|
|
#define BROTLI_ENC_RINGBUFFER_H_
|
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
#include <string.h> /* memcpy */
|
2014-10-28 13:05:53 +00:00
|
|
|
|
2016-08-23 12:40:33 +00:00
|
|
|
#include <brotli/types.h>
|
2022-11-17 13:03:09 +00:00
|
|
|
|
|
|
|
#include "../common/platform.h"
|
2021-09-08 07:18:45 +00:00
|
|
|
#include "memory.h"
|
|
|
|
#include "quality.h"
|
2015-04-01 14:10:15 +00:00
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
#if defined(__cplusplus) || defined(c_plusplus)
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
2015-04-01 14:10:15 +00:00
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* A RingBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
|
|
|
|
data in a circular manner: writing a byte writes it to:
|
|
|
|
`position() % (1 << window_bits)'.
|
|
|
|
For convenience, the RingBuffer array contains another copy of the
|
|
|
|
first `1 << tail_bits' bytes:
|
|
|
|
buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
|
|
|
|
and another copy of the last two bytes:
|
|
|
|
buffer_[-1] == buffer_[(1 << window_bits) - 1] and
|
|
|
|
buffer_[-2] == buffer_[(1 << window_bits) - 2]. */
|
2016-06-13 09:01:04 +00:00
|
|
|
typedef struct RingBuffer {
|
2016-10-31 13:33:59 +00:00
|
|
|
/* Size of the ring-buffer is (1 << window_bits) + tail_size_. */
|
2016-06-13 09:01:04 +00:00
|
|
|
const uint32_t size_;
|
|
|
|
const uint32_t mask_;
|
|
|
|
const uint32_t tail_size_;
|
|
|
|
const uint32_t total_size_;
|
|
|
|
|
|
|
|
uint32_t cur_size_;
|
|
|
|
/* Position to write in the ring buffer. */
|
|
|
|
uint32_t pos_;
|
|
|
|
/* The actual ring buffer containing the copy of the last two bytes, the data,
|
|
|
|
and the copy of the beginning as a tail. */
|
2018-02-26 14:04:36 +00:00
|
|
|
uint8_t* data_;
|
2016-10-31 13:33:59 +00:00
|
|
|
/* The start of the ring-buffer. */
|
2018-02-26 14:04:36 +00:00
|
|
|
uint8_t* buffer_;
|
2016-06-13 09:01:04 +00:00
|
|
|
} RingBuffer;
|
|
|
|
|
|
|
|
static BROTLI_INLINE void RingBufferInit(RingBuffer* rb) {
|
|
|
|
rb->cur_size_ = 0;
|
|
|
|
rb->pos_ = 0;
|
|
|
|
rb->data_ = 0;
|
|
|
|
rb->buffer_ = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static BROTLI_INLINE void RingBufferSetup(
|
2016-07-26 12:41:59 +00:00
|
|
|
const BrotliEncoderParams* params, RingBuffer* rb) {
|
|
|
|
int window_bits = ComputeRbBits(params);
|
|
|
|
int tail_bits = params->lgblock;
|
2016-06-13 09:01:04 +00:00
|
|
|
*(uint32_t*)&rb->size_ = 1u << window_bits;
|
|
|
|
*(uint32_t*)&rb->mask_ = (1u << window_bits) - 1;
|
|
|
|
*(uint32_t*)&rb->tail_size_ = 1u << tail_bits;
|
|
|
|
*(uint32_t*)&rb->total_size_ = rb->size_ + rb->tail_size_;
|
|
|
|
}
|
|
|
|
|
|
|
|
static BROTLI_INLINE void RingBufferFree(MemoryManager* m, RingBuffer* rb) {
|
|
|
|
BROTLI_FREE(m, rb->data_);
|
|
|
|
}
|
2016-03-15 09:50:16 +00:00
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Allocates or re-allocates data_ to the given length + plus some slack
|
|
|
|
region before and after. Fills the slack regions with zeros. */
|
2016-06-13 09:01:04 +00:00
|
|
|
static BROTLI_INLINE void RingBufferInitBuffer(
|
|
|
|
MemoryManager* m, const uint32_t buflen, RingBuffer* rb) {
|
|
|
|
static const size_t kSlackForEightByteHashingEverywhere = 7;
|
|
|
|
uint8_t* new_data = BROTLI_ALLOC(
|
|
|
|
m, uint8_t, 2 + buflen + kSlackForEightByteHashingEverywhere);
|
|
|
|
size_t i;
|
2019-05-03 09:51:11 +00:00
|
|
|
if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(new_data)) return;
|
2016-06-13 09:01:04 +00:00
|
|
|
if (rb->data_) {
|
|
|
|
memcpy(new_data, rb->data_,
|
|
|
|
2 + rb->cur_size_ + kSlackForEightByteHashingEverywhere);
|
|
|
|
BROTLI_FREE(m, rb->data_);
|
|
|
|
}
|
|
|
|
rb->data_ = new_data;
|
|
|
|
rb->cur_size_ = buflen;
|
|
|
|
rb->buffer_ = rb->data_ + 2;
|
|
|
|
rb->buffer_[-2] = rb->buffer_[-1] = 0;
|
|
|
|
for (i = 0; i < kSlackForEightByteHashingEverywhere; ++i) {
|
|
|
|
rb->buffer_[rb->cur_size_ + i] = 0;
|
2013-11-15 18:02:17 +00:00
|
|
|
}
|
2016-06-13 09:01:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static BROTLI_INLINE void RingBufferWriteTail(
|
2018-02-26 14:04:36 +00:00
|
|
|
const uint8_t* bytes, size_t n, RingBuffer* rb) {
|
2016-06-13 09:01:04 +00:00
|
|
|
const size_t masked_pos = rb->pos_ & rb->mask_;
|
2016-10-18 14:45:32 +00:00
|
|
|
if (BROTLI_PREDICT_FALSE(masked_pos < rb->tail_size_)) {
|
2016-06-13 09:01:04 +00:00
|
|
|
/* Just fill the tail buffer with the beginning data. */
|
|
|
|
const size_t p = rb->size_ + masked_pos;
|
|
|
|
memcpy(&rb->buffer_[p], bytes,
|
|
|
|
BROTLI_MIN(size_t, n, rb->tail_size_ - masked_pos));
|
|
|
|
}
|
|
|
|
}
|
2013-11-15 18:02:17 +00:00
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Push bytes into the ring buffer. */
|
2016-06-13 09:01:04 +00:00
|
|
|
static BROTLI_INLINE void RingBufferWrite(
|
2018-02-26 14:04:36 +00:00
|
|
|
MemoryManager* m, const uint8_t* bytes, size_t n, RingBuffer* rb) {
|
2016-06-13 09:01:04 +00:00
|
|
|
if (rb->pos_ == 0 && n < rb->tail_size_) {
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Special case for the first write: to process the first block, we don't
|
2016-10-31 13:33:59 +00:00
|
|
|
need to allocate the whole ring-buffer and we don't need the tail
|
2016-06-03 09:19:23 +00:00
|
|
|
either. However, we do this memory usage optimization only if the
|
|
|
|
first write is less than the tail size, which is also the input block
|
|
|
|
size, otherwise it is likely that other blocks will follow and we
|
|
|
|
will need to reallocate to the full size anyway. */
|
2016-06-13 09:01:04 +00:00
|
|
|
rb->pos_ = (uint32_t)n;
|
|
|
|
RingBufferInitBuffer(m, rb->pos_, rb);
|
|
|
|
if (BROTLI_IS_OOM(m)) return;
|
|
|
|
memcpy(rb->buffer_, bytes, n);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (rb->cur_size_ < rb->total_size_) {
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Lazily allocate the full buffer. */
|
2016-06-13 09:01:04 +00:00
|
|
|
RingBufferInitBuffer(m, rb->total_size_, rb);
|
|
|
|
if (BROTLI_IS_OOM(m)) return;
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Initialize the last two bytes to zero, so that we don't have to worry
|
|
|
|
later when we copy the last two bytes to the first two positions. */
|
2016-06-13 09:01:04 +00:00
|
|
|
rb->buffer_[rb->size_ - 2] = 0;
|
|
|
|
rb->buffer_[rb->size_ - 1] = 0;
|
2019-04-12 11:57:42 +00:00
|
|
|
/* Initialize tail; might be touched by "best_len++" optimization when
|
|
|
|
ring buffer is "full". */
|
|
|
|
rb->buffer_[rb->size_] = 241;
|
2016-06-13 09:01:04 +00:00
|
|
|
}
|
|
|
|
{
|
|
|
|
const size_t masked_pos = rb->pos_ & rb->mask_;
|
2016-06-03 09:19:23 +00:00
|
|
|
/* The length of the writes is limited so that we do not need to worry
|
|
|
|
about a write */
|
2016-06-13 09:01:04 +00:00
|
|
|
RingBufferWriteTail(bytes, n, rb);
|
2016-10-18 14:45:32 +00:00
|
|
|
if (BROTLI_PREDICT_TRUE(masked_pos + n <= rb->size_)) {
|
2016-06-03 09:19:23 +00:00
|
|
|
/* A single write fits. */
|
2016-06-13 09:01:04 +00:00
|
|
|
memcpy(&rb->buffer_[masked_pos], bytes, n);
|
2013-11-15 18:02:17 +00:00
|
|
|
} else {
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Split into two writes.
|
|
|
|
Copy into the end of the buffer, including the tail buffer. */
|
2016-06-13 09:01:04 +00:00
|
|
|
memcpy(&rb->buffer_[masked_pos], bytes,
|
|
|
|
BROTLI_MIN(size_t, n, rb->total_size_ - masked_pos));
|
2016-06-03 09:19:23 +00:00
|
|
|
/* Copy into the beginning of the buffer */
|
2016-06-13 09:01:04 +00:00
|
|
|
memcpy(&rb->buffer_[0], bytes + (rb->size_ - masked_pos),
|
|
|
|
n - (rb->size_ - masked_pos));
|
2016-01-07 15:27:49 +00:00
|
|
|
}
|
2013-11-15 18:02:17 +00:00
|
|
|
}
|
2018-02-26 14:04:36 +00:00
|
|
|
{
|
|
|
|
BROTLI_BOOL not_first_lap = (rb->pos_ & (1u << 31)) != 0;
|
|
|
|
uint32_t rb_pos_mask = (1u << 31) - 1;
|
|
|
|
rb->buffer_[-2] = rb->buffer_[rb->size_ - 2];
|
|
|
|
rb->buffer_[-1] = rb->buffer_[rb->size_ - 1];
|
|
|
|
rb->pos_ = (rb->pos_ & rb_pos_mask) + (uint32_t)(n & rb_pos_mask);
|
|
|
|
if (not_first_lap) {
|
|
|
|
/* Wrap, but preserve not-a-first-lap feature. */
|
|
|
|
rb->pos_ |= 1u << 31;
|
|
|
|
}
|
2015-04-01 14:10:15 +00:00
|
|
|
}
|
2016-06-13 09:01:04 +00:00
|
|
|
}
|
2015-04-01 14:10:15 +00:00
|
|
|
|
2016-06-13 09:01:04 +00:00
|
|
|
#if defined(__cplusplus) || defined(c_plusplus)
|
|
|
|
} /* extern "C" */
|
|
|
|
#endif
|
2015-04-01 14:10:15 +00:00
|
|
|
|
2016-06-03 09:19:23 +00:00
|
|
|
#endif /* BROTLI_ENC_RINGBUFFER_H_ */
|