2015-11-27 10:27:11 +00:00
|
|
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
|
|
|
|
2015-12-11 10:11:51 +00:00
|
|
|
Distributed under MIT license.
|
2015-11-27 10:27:11 +00:00
|
|
|
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
|
|
|
*/
|
|
|
|
|
2013-10-23 11:06:13 +00:00
|
|
|
// Function to find backward reference copies.
|
|
|
|
|
|
|
|
#include "./backward_references.h"
|
|
|
|
|
|
|
|
#include <algorithm>
|
2015-06-29 12:20:25 +00:00
|
|
|
#include <limits>
|
2013-10-23 11:06:13 +00:00
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "./command.h"
|
2015-06-29 12:20:25 +00:00
|
|
|
#include "./fast_log.h"
|
2015-10-01 13:10:42 +00:00
|
|
|
#include "./literal_cost.h"
|
2013-10-23 11:06:13 +00:00
|
|
|
|
|
|
|
namespace brotli {
|
|
|
|
|
2016-01-27 08:50:39 +00:00
|
|
|
// The maximum length for which the zopflification uses distinct distances.
|
|
|
|
static const uint16_t kMaxZopfliLen = 325;
|
|
|
|
|
2015-06-29 12:20:25 +00:00
|
|
|
static const double kInfinity = std::numeric_limits<double>::infinity();
|
2015-06-12 14:25:41 +00:00
|
|
|
|
|
|
|
// Histogram based cost model for zopflification.
|
|
|
|
class ZopfliCostModel {
|
|
|
|
public:
|
2016-02-29 13:41:24 +00:00
|
|
|
ZopfliCostModel(void) : min_cost_cmd_(kInfinity) {}
|
2015-10-05 09:43:31 +00:00
|
|
|
|
2015-06-12 14:25:41 +00:00
|
|
|
void SetFromCommands(size_t num_bytes,
|
|
|
|
size_t position,
|
|
|
|
const uint8_t* ringbuffer,
|
|
|
|
size_t ringbuffer_mask,
|
|
|
|
const Command* commands,
|
2015-10-28 16:44:47 +00:00
|
|
|
size_t num_commands,
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t last_insert_len) {
|
|
|
|
std::vector<uint32_t> histogram_literal(256, 0);
|
|
|
|
std::vector<uint32_t> histogram_cmd(kNumCommandPrefixes, 0);
|
|
|
|
std::vector<uint32_t> histogram_dist(kNumDistancePrefixes, 0);
|
2015-06-12 14:25:41 +00:00
|
|
|
|
|
|
|
size_t pos = position - last_insert_len;
|
2015-10-28 16:44:47 +00:00
|
|
|
for (size_t i = 0; i < num_commands; i++) {
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t inslength = commands[i].insert_len_;
|
|
|
|
size_t copylength = commands[i].copy_len_;
|
|
|
|
size_t distcode = commands[i].dist_prefix_;
|
|
|
|
size_t cmdcode = commands[i].cmd_prefix_;
|
2015-06-12 14:25:41 +00:00
|
|
|
|
|
|
|
histogram_cmd[cmdcode]++;
|
|
|
|
if (cmdcode >= 128) histogram_dist[distcode]++;
|
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
for (size_t j = 0; j < inslength; j++) {
|
2015-06-12 14:25:41 +00:00
|
|
|
histogram_literal[ringbuffer[(pos + j) & ringbuffer_mask]]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
pos += inslength + copylength;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<double> cost_literal;
|
|
|
|
Set(histogram_literal, &cost_literal);
|
|
|
|
Set(histogram_cmd, &cost_cmd_);
|
|
|
|
Set(histogram_dist, &cost_dist_);
|
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
for (uint32_t i = 0; i < kNumCommandPrefixes; ++i) {
|
2015-06-12 14:25:41 +00:00
|
|
|
min_cost_cmd_ = std::min(min_cost_cmd_, cost_cmd_[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
literal_costs_.resize(num_bytes + 1);
|
|
|
|
literal_costs_[0] = 0.0;
|
2015-10-28 16:44:47 +00:00
|
|
|
for (size_t i = 0; i < num_bytes; ++i) {
|
2015-06-12 14:25:41 +00:00
|
|
|
literal_costs_[i + 1] = literal_costs_[i] +
|
|
|
|
cost_literal[ringbuffer[(position + i) & ringbuffer_mask]];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetFromLiteralCosts(size_t num_bytes,
|
|
|
|
size_t position,
|
2015-10-01 13:10:42 +00:00
|
|
|
const uint8_t* ringbuffer,
|
|
|
|
size_t ringbuffer_mask) {
|
2015-10-01 15:08:59 +00:00
|
|
|
std::vector<float> literal_cost(num_bytes + 1);
|
2015-10-01 13:10:42 +00:00
|
|
|
EstimateBitCostsForLiterals(position, num_bytes, ringbuffer_mask,
|
|
|
|
ringbuffer, &literal_cost[0]);
|
2015-06-12 14:25:41 +00:00
|
|
|
literal_costs_.resize(num_bytes + 1);
|
|
|
|
literal_costs_[0] = 0.0;
|
2015-10-28 16:44:47 +00:00
|
|
|
for (size_t i = 0; i < num_bytes; ++i) {
|
2015-10-01 13:10:42 +00:00
|
|
|
literal_costs_[i + 1] = literal_costs_[i] + literal_cost[i];
|
2015-06-12 14:25:41 +00:00
|
|
|
}
|
|
|
|
cost_cmd_.resize(kNumCommandPrefixes);
|
|
|
|
cost_dist_.resize(kNumDistancePrefixes);
|
2016-01-07 15:27:49 +00:00
|
|
|
for (uint32_t i = 0; i < kNumCommandPrefixes; ++i) {
|
2015-06-12 14:25:41 +00:00
|
|
|
cost_cmd_[i] = FastLog2(11 + i);
|
|
|
|
}
|
2016-01-07 15:27:49 +00:00
|
|
|
for (uint32_t i = 0; i < kNumDistancePrefixes; ++i) {
|
2015-06-12 14:25:41 +00:00
|
|
|
cost_dist_[i] = FastLog2(20 + i);
|
|
|
|
}
|
|
|
|
min_cost_cmd_ = FastLog2(11);
|
|
|
|
}
|
|
|
|
|
|
|
|
double GetCommandCost(
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t dist_code, size_t length_code, size_t insert_length) const {
|
2015-10-28 16:44:47 +00:00
|
|
|
uint16_t inscode = GetInsertLengthCode(insert_length);
|
|
|
|
uint16_t copycode = GetCopyLengthCode(length_code);
|
|
|
|
uint16_t cmdcode = CombineLengthCodes(inscode, copycode, dist_code == 0);
|
2015-06-12 14:25:41 +00:00
|
|
|
uint16_t dist_symbol;
|
|
|
|
uint32_t distextra;
|
2015-08-28 14:09:23 +00:00
|
|
|
PrefixEncodeCopyDistance(dist_code, 0, 0, &dist_symbol, &distextra);
|
2015-06-12 14:25:41 +00:00
|
|
|
uint32_t distnumextra = distextra >> 24;
|
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
double result = static_cast<double>(
|
|
|
|
kInsExtra[inscode] + kCopyExtra[copycode] + distnumextra);
|
2015-06-12 14:25:41 +00:00
|
|
|
result += cost_cmd_[cmdcode];
|
|
|
|
if (cmdcode >= 128) result += cost_dist_[dist_symbol];
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2015-10-28 16:44:47 +00:00
|
|
|
double GetLiteralCosts(size_t from, size_t to) const {
|
2015-06-12 14:25:41 +00:00
|
|
|
return literal_costs_[to] - literal_costs_[from];
|
|
|
|
}
|
|
|
|
|
2016-02-29 13:41:24 +00:00
|
|
|
double GetMinCostCmd(void) const {
|
2015-06-12 14:25:41 +00:00
|
|
|
return min_cost_cmd_;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2016-01-07 15:27:49 +00:00
|
|
|
void Set(const std::vector<uint32_t>& histogram, std::vector<double>* cost) {
|
2015-06-12 14:25:41 +00:00
|
|
|
cost->resize(histogram.size());
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t sum = 0;
|
2015-06-12 14:25:41 +00:00
|
|
|
for (size_t i = 0; i < histogram.size(); i++) {
|
|
|
|
sum += histogram[i];
|
|
|
|
}
|
2015-06-29 12:20:25 +00:00
|
|
|
double log2sum = FastLog2(sum);
|
2015-06-12 14:25:41 +00:00
|
|
|
for (size_t i = 0; i < histogram.size(); i++) {
|
|
|
|
if (histogram[i] == 0) {
|
2015-06-29 12:20:25 +00:00
|
|
|
(*cost)[i] = log2sum + 2;
|
2015-06-12 14:25:41 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shannon bits for this symbol.
|
2015-06-29 12:20:25 +00:00
|
|
|
(*cost)[i] = log2sum - FastLog2(histogram[i]);
|
2015-06-12 14:25:41 +00:00
|
|
|
|
|
|
|
// Cannot be coded with less than 1 bit
|
|
|
|
if ((*cost)[i] < 1) (*cost)[i] = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<double> cost_cmd_; // The insert and copy length symbols.
|
|
|
|
std::vector<double> cost_dist_;
|
|
|
|
// Cumulative costs of literals per position in the stream.
|
|
|
|
std::vector<double> literal_costs_;
|
|
|
|
double min_cost_cmd_;
|
|
|
|
};
|
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
inline void SetDistanceCache(size_t distance,
|
|
|
|
size_t distance_code,
|
|
|
|
size_t max_distance,
|
2015-06-12 14:25:41 +00:00
|
|
|
const int* dist_cache,
|
|
|
|
int* result_dist_cache) {
|
|
|
|
if (distance <= max_distance && distance_code > 0) {
|
2016-01-07 15:27:49 +00:00
|
|
|
result_dist_cache[0] = static_cast<int>(distance);
|
2015-06-12 14:25:41 +00:00
|
|
|
memcpy(&result_dist_cache[1], dist_cache, 3 * sizeof(dist_cache[0]));
|
|
|
|
} else {
|
|
|
|
memcpy(result_dist_cache, dist_cache, 4 * sizeof(dist_cache[0]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
inline size_t ComputeDistanceCode(size_t distance,
|
|
|
|
size_t max_distance,
|
|
|
|
int quality,
|
|
|
|
const int* dist_cache) {
|
2015-06-12 14:25:41 +00:00
|
|
|
if (distance <= max_distance) {
|
2016-01-07 15:27:49 +00:00
|
|
|
if (distance == static_cast<size_t>(dist_cache[0])) {
|
2015-06-12 14:25:41 +00:00
|
|
|
return 0;
|
2016-01-07 15:27:49 +00:00
|
|
|
} else if (distance == static_cast<size_t>(dist_cache[1])) {
|
2015-06-12 14:25:41 +00:00
|
|
|
return 1;
|
2016-01-07 15:27:49 +00:00
|
|
|
} else if (distance == static_cast<size_t>(dist_cache[2])) {
|
2015-06-12 14:25:41 +00:00
|
|
|
return 2;
|
2016-01-07 15:27:49 +00:00
|
|
|
} else if (distance == static_cast<size_t>(dist_cache[3])) {
|
2015-06-12 14:25:41 +00:00
|
|
|
return 3;
|
|
|
|
} else if (quality > 3 && distance >= 6) {
|
2016-01-07 15:27:49 +00:00
|
|
|
for (size_t k = 4; k < kNumDistanceShortCodes; ++k) {
|
|
|
|
size_t idx = kDistanceCacheIndex[k];
|
|
|
|
size_t candidate =
|
|
|
|
static_cast<size_t>(dist_cache[idx] + kDistanceCacheOffset[k]);
|
|
|
|
static const size_t kLimits[16] = { 0, 0, 0, 0,
|
|
|
|
6, 6, 11, 11,
|
|
|
|
11, 11, 11, 11,
|
|
|
|
12, 12, 12, 12 };
|
2015-06-12 14:25:41 +00:00
|
|
|
if (distance == candidate && distance >= kLimits[k]) {
|
|
|
|
return k;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return distance + 15;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ZopfliNode {
|
|
|
|
ZopfliNode() : length(1),
|
|
|
|
distance(0),
|
|
|
|
distance_code(0),
|
|
|
|
length_code(0),
|
|
|
|
insert_length(0),
|
|
|
|
cost(kInfinity) {}
|
|
|
|
|
|
|
|
// best length to get up to this byte (not including this byte itself)
|
2016-01-07 15:27:49 +00:00
|
|
|
uint32_t length;
|
2015-06-12 14:25:41 +00:00
|
|
|
// distance associated with the length
|
2016-01-07 15:27:49 +00:00
|
|
|
uint32_t distance;
|
|
|
|
uint32_t distance_code;
|
2015-06-12 14:25:41 +00:00
|
|
|
int distance_cache[4];
|
|
|
|
// length code associated with the length - usually the same as length,
|
|
|
|
// except in case of length-changing dictionary transformation.
|
2016-01-07 15:27:49 +00:00
|
|
|
uint32_t length_code;
|
2015-06-12 14:25:41 +00:00
|
|
|
// number of literal inserts before this copy
|
2016-01-07 15:27:49 +00:00
|
|
|
uint32_t insert_length;
|
2015-06-12 14:25:41 +00:00
|
|
|
// smallest cost to get to this byte from the beginning, as found so far
|
|
|
|
double cost;
|
|
|
|
};
|
|
|
|
|
|
|
|
inline void UpdateZopfliNode(ZopfliNode* nodes, size_t pos, size_t start_pos,
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t len, size_t len_code, size_t dist,
|
|
|
|
size_t dist_code, size_t max_dist,
|
|
|
|
const int* dist_cache, double cost) {
|
2015-06-12 14:25:41 +00:00
|
|
|
ZopfliNode& next = nodes[pos + len];
|
2016-01-07 15:27:49 +00:00
|
|
|
next.length = static_cast<uint32_t>(len);
|
|
|
|
next.length_code = static_cast<uint32_t>(len_code);
|
|
|
|
next.distance = static_cast<uint32_t>(dist);
|
|
|
|
next.distance_code = static_cast<uint32_t>(dist_code);
|
|
|
|
next.insert_length = static_cast<uint32_t>(pos - start_pos);
|
2015-06-12 14:25:41 +00:00
|
|
|
next.cost = cost;
|
|
|
|
SetDistanceCache(dist, dist_code, max_dist, dist_cache,
|
|
|
|
&next.distance_cache[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Maintains the smallest 2^k cost difference together with their positions
|
|
|
|
class StartPosQueue {
|
|
|
|
public:
|
|
|
|
explicit StartPosQueue(int bits)
|
2016-01-07 15:27:49 +00:00
|
|
|
: mask_((1u << bits) - 1), q_(1 << bits), idx_(0) {}
|
2015-06-12 14:25:41 +00:00
|
|
|
|
2016-02-29 13:41:24 +00:00
|
|
|
void Clear(void) {
|
2015-06-12 14:25:41 +00:00
|
|
|
idx_ = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Push(size_t pos, double costdiff) {
|
2015-08-10 11:13:58 +00:00
|
|
|
if (costdiff == kInfinity) {
|
|
|
|
// We can't start a command from an unreachable start position.
|
|
|
|
// E.g. position 1 in a stream is always unreachable, because all commands
|
|
|
|
// have a copy of at least length 2.
|
|
|
|
return;
|
|
|
|
}
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t offset = -idx_ & mask_;
|
|
|
|
++idx_;
|
|
|
|
size_t len = size();
|
|
|
|
q_[offset] = std::make_pair(pos, costdiff);
|
|
|
|
/* Restore the sorted order. In the list of |len| items at most |len - 1|
|
|
|
|
adjacent element comparisons / swaps are required. */
|
|
|
|
for (size_t i = 1; i < len; ++i) {
|
|
|
|
if (q_[offset & mask_].second > q_[(offset + 1) & mask_].second) {
|
|
|
|
std::swap(q_[offset & mask_], q_[(offset + 1) & mask_]);
|
2015-06-12 14:25:41 +00:00
|
|
|
}
|
2016-01-07 15:27:49 +00:00
|
|
|
++offset;
|
2015-06-12 14:25:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-29 13:41:24 +00:00
|
|
|
size_t size(void) const { return std::min(idx_, mask_ + 1); }
|
2015-06-12 14:25:41 +00:00
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t GetStartPos(size_t k) const {
|
|
|
|
return q_[(k + 1 - idx_) & mask_].first;
|
2015-06-12 14:25:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2016-01-07 15:27:49 +00:00
|
|
|
const size_t mask_;
|
2015-06-12 14:25:41 +00:00
|
|
|
std::vector<std::pair<size_t, double> > q_;
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t idx_;
|
2015-06-12 14:25:41 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Returns the minimum possible copy length that can improve the cost of any
|
|
|
|
// future position.
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t ComputeMinimumCopyLength(const StartPosQueue& queue,
|
|
|
|
const std::vector<ZopfliNode>& nodes,
|
|
|
|
const ZopfliCostModel& model,
|
|
|
|
size_t pos,
|
|
|
|
double min_cost_cmd) {
|
2015-06-12 14:25:41 +00:00
|
|
|
// Compute the minimum possible cost of reaching any future position.
|
|
|
|
const size_t start0 = queue.GetStartPos(0);
|
|
|
|
double min_cost = (nodes[start0].cost +
|
|
|
|
model.GetLiteralCosts(start0, pos) +
|
|
|
|
min_cost_cmd);
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t len = 2;
|
|
|
|
size_t next_len_bucket = 4;
|
|
|
|
size_t next_len_offset = 10;
|
2015-06-12 14:25:41 +00:00
|
|
|
while (pos + len < nodes.size() && nodes[pos + len].cost <= min_cost) {
|
|
|
|
// We already reached (pos + len) with no more cost than the minimum
|
|
|
|
// possible cost of reaching anything from this pos, so there is no point in
|
|
|
|
// looking for lengths <= len.
|
|
|
|
++len;
|
|
|
|
if (len == next_len_offset) {
|
|
|
|
// We reached the next copy length code bucket, so we add one more
|
|
|
|
// extra bit to the minimum cost.
|
|
|
|
min_cost += 1.0;
|
|
|
|
next_len_offset += next_len_bucket;
|
|
|
|
next_len_bucket *= 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ZopfliIterate(size_t num_bytes,
|
|
|
|
size_t position,
|
|
|
|
const uint8_t* ringbuffer,
|
|
|
|
size_t ringbuffer_mask,
|
|
|
|
const size_t max_backward_limit,
|
|
|
|
const ZopfliCostModel& model,
|
2016-01-07 15:27:49 +00:00
|
|
|
const std::vector<uint32_t>& num_matches,
|
2015-06-12 14:25:41 +00:00
|
|
|
const std::vector<BackwardMatch>& matches,
|
|
|
|
int* dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t* last_insert_len,
|
2015-06-12 14:25:41 +00:00
|
|
|
Command* commands,
|
2015-10-28 16:44:47 +00:00
|
|
|
size_t* num_commands,
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t* num_literals) {
|
2015-06-12 14:25:41 +00:00
|
|
|
const Command * const orig_commands = commands;
|
|
|
|
|
|
|
|
std::vector<ZopfliNode> nodes(num_bytes + 1);
|
|
|
|
nodes[0].length = 0;
|
|
|
|
nodes[0].cost = 0;
|
|
|
|
memcpy(nodes[0].distance_cache, dist_cache, 4 * sizeof(dist_cache[0]));
|
|
|
|
|
|
|
|
StartPosQueue queue(3);
|
|
|
|
const double min_cost_cmd = model.GetMinCostCmd();
|
|
|
|
|
|
|
|
size_t cur_match_pos = 0;
|
|
|
|
for (size_t i = 0; i + 3 < num_bytes; i++) {
|
|
|
|
size_t cur_ix = position + i;
|
|
|
|
size_t cur_ix_masked = cur_ix & ringbuffer_mask;
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t max_distance = std::min(cur_ix, max_backward_limit);
|
|
|
|
size_t max_length = num_bytes - i;
|
2015-06-12 14:25:41 +00:00
|
|
|
|
|
|
|
queue.Push(i, nodes[i].cost - model.GetLiteralCosts(0, i));
|
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
const size_t min_len = ComputeMinimumCopyLength(queue, nodes, model,
|
|
|
|
i, min_cost_cmd);
|
2015-06-12 14:25:41 +00:00
|
|
|
|
|
|
|
// Go over the command starting positions in order of increasing cost
|
|
|
|
// difference.
|
2016-01-07 15:27:49 +00:00
|
|
|
for (size_t k = 0; k < 5 && k < queue.size(); ++k) {
|
2015-06-12 14:25:41 +00:00
|
|
|
const size_t start = queue.GetStartPos(k);
|
|
|
|
const double start_costdiff =
|
|
|
|
nodes[start].cost - model.GetLiteralCosts(0, start);
|
|
|
|
const int* dist_cache2 = &nodes[start].distance_cache[0];
|
|
|
|
|
|
|
|
// Look for last distance matches using the distance cache from this
|
|
|
|
// starting position.
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t best_len = min_len - 1;
|
|
|
|
for (size_t j = 0; j < kNumDistanceShortCodes; ++j) {
|
|
|
|
const size_t idx = kDistanceCacheIndex[j];
|
|
|
|
const size_t backward =
|
|
|
|
static_cast<size_t>(dist_cache2[idx] + kDistanceCacheOffset[j]);
|
2015-06-12 14:25:41 +00:00
|
|
|
size_t prev_ix = cur_ix - backward;
|
|
|
|
if (prev_ix >= cur_ix) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (PREDICT_FALSE(backward > max_distance)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
prev_ix &= ringbuffer_mask;
|
|
|
|
|
|
|
|
if (cur_ix_masked + best_len > ringbuffer_mask ||
|
|
|
|
prev_ix + best_len > ringbuffer_mask ||
|
|
|
|
ringbuffer[cur_ix_masked + best_len] !=
|
|
|
|
ringbuffer[prev_ix + best_len]) {
|
|
|
|
continue;
|
|
|
|
}
|
2016-01-07 15:27:49 +00:00
|
|
|
const size_t len =
|
2015-06-12 14:25:41 +00:00
|
|
|
FindMatchLengthWithLimit(&ringbuffer[prev_ix],
|
|
|
|
&ringbuffer[cur_ix_masked],
|
|
|
|
max_length);
|
2016-01-07 15:27:49 +00:00
|
|
|
for (size_t l = best_len + 1; l <= len; ++l) {
|
|
|
|
const size_t inslen = i - start;
|
2015-10-28 16:44:47 +00:00
|
|
|
double cmd_cost = model.GetCommandCost(j, l, inslen);
|
2015-06-12 14:25:41 +00:00
|
|
|
double cost = start_costdiff + cmd_cost + model.GetLiteralCosts(0, i);
|
|
|
|
if (cost < nodes[i + l].cost) {
|
|
|
|
UpdateZopfliNode(&nodes[0], i, start, l, l, backward, j,
|
|
|
|
max_distance, dist_cache2, cost);
|
|
|
|
}
|
|
|
|
best_len = l;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// At higher iterations look only for new last distance matches, since
|
|
|
|
// looking only for new command start positions with the same distances
|
|
|
|
// does not help much.
|
|
|
|
if (k >= 2) continue;
|
|
|
|
|
|
|
|
// Loop through all possible copy lengths at this position.
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t len = min_len;
|
|
|
|
for (size_t j = 0; j < num_matches[i]; ++j) {
|
2015-06-12 14:25:41 +00:00
|
|
|
BackwardMatch match = matches[cur_match_pos + j];
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t dist = match.distance;
|
2015-06-12 14:25:41 +00:00
|
|
|
bool is_dictionary_match = dist > max_distance;
|
|
|
|
// We already tried all possible last distance matches, so we can use
|
|
|
|
// normal distance code here.
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t dist_code = dist + 15;
|
2015-06-12 14:25:41 +00:00
|
|
|
// Try all copy lengths up until the maximum copy length corresponding
|
|
|
|
// to this distance. If the distance refers to the static dictionary, or
|
|
|
|
// the maximum length is long enough, try only one maximum length.
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t max_len = match.length();
|
2015-06-12 14:25:41 +00:00
|
|
|
if (len < max_len && (is_dictionary_match || max_len > kMaxZopfliLen)) {
|
|
|
|
len = max_len;
|
|
|
|
}
|
|
|
|
for (; len <= max_len; ++len) {
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t len_code = is_dictionary_match ? match.length_code() : len;
|
|
|
|
const size_t inslen = i - start;
|
2015-10-28 16:44:47 +00:00
|
|
|
double cmd_cost = model.GetCommandCost(dist_code, len_code, inslen);
|
2015-06-12 14:25:41 +00:00
|
|
|
double cost = start_costdiff + cmd_cost + model.GetLiteralCosts(0, i);
|
|
|
|
if (cost < nodes[i + len].cost) {
|
|
|
|
UpdateZopfliNode(&nodes[0], i, start, len, len_code, dist,
|
|
|
|
dist_code, max_distance, dist_cache2, cost);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_match_pos += num_matches[i];
|
|
|
|
|
|
|
|
// The zopflification can be too slow in case of very long lengths, so in
|
|
|
|
// such case skip it all, it does not cost a lot of compression ratio.
|
|
|
|
if (num_matches[i] == 1 &&
|
|
|
|
matches[cur_match_pos - 1].length() > kMaxZopfliLen) {
|
|
|
|
i += matches[cur_match_pos - 1].length() - 1;
|
|
|
|
queue.Clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
std::vector<uint32_t> backwards;
|
2015-06-12 14:25:41 +00:00
|
|
|
size_t index = num_bytes;
|
|
|
|
while (nodes[index].cost == kInfinity) --index;
|
2016-01-07 15:27:49 +00:00
|
|
|
while (index != 0) {
|
|
|
|
size_t len = nodes[index].length + nodes[index].insert_length;
|
|
|
|
backwards.push_back(static_cast<uint32_t>(len));
|
2015-06-12 14:25:41 +00:00
|
|
|
index -= len;
|
|
|
|
}
|
|
|
|
|
2016-01-07 15:27:49 +00:00
|
|
|
std::vector<uint32_t> path;
|
2015-06-12 14:25:41 +00:00
|
|
|
for (size_t i = backwards.size(); i > 0; i--) {
|
|
|
|
path.push_back(backwards[i - 1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t pos = 0;
|
|
|
|
for (size_t i = 0; i < path.size(); i++) {
|
|
|
|
const ZopfliNode& next = nodes[pos + path[i]];
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t copy_length = next.length;
|
|
|
|
size_t insert_length = next.insert_length;
|
2015-06-12 14:25:41 +00:00
|
|
|
pos += insert_length;
|
|
|
|
if (i == 0) {
|
|
|
|
insert_length += *last_insert_len;
|
2015-09-01 10:18:41 +00:00
|
|
|
*last_insert_len = 0;
|
2015-06-12 14:25:41 +00:00
|
|
|
}
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t distance = next.distance;
|
|
|
|
size_t len_code = next.length_code;
|
|
|
|
size_t max_distance = std::min(position + pos, max_backward_limit);
|
2015-06-12 14:25:41 +00:00
|
|
|
bool is_dictionary = (distance > max_distance);
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t dist_code = next.distance_code;
|
2015-06-12 14:25:41 +00:00
|
|
|
|
|
|
|
Command cmd(insert_length, copy_length, len_code, dist_code);
|
|
|
|
*commands++ = cmd;
|
|
|
|
|
|
|
|
if (!is_dictionary && dist_code > 0) {
|
|
|
|
dist_cache[3] = dist_cache[2];
|
|
|
|
dist_cache[2] = dist_cache[1];
|
|
|
|
dist_cache[1] = dist_cache[0];
|
2016-01-07 15:27:49 +00:00
|
|
|
dist_cache[0] = static_cast<int>(distance);
|
2015-06-12 14:25:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*num_literals += insert_length;
|
|
|
|
insert_length = 0;
|
|
|
|
pos += copy_length;
|
|
|
|
}
|
2016-01-07 15:27:49 +00:00
|
|
|
*last_insert_len += num_bytes - pos;
|
|
|
|
*num_commands += static_cast<size_t>(commands - orig_commands);
|
2015-06-12 14:25:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename Hasher>
|
2013-11-15 18:02:17 +00:00
|
|
|
void CreateBackwardReferences(size_t num_bytes,
|
|
|
|
size_t position,
|
2016-01-07 15:27:49 +00:00
|
|
|
bool is_last,
|
2013-11-15 18:02:17 +00:00
|
|
|
const uint8_t* ringbuffer,
|
|
|
|
size_t ringbuffer_mask,
|
2014-10-28 12:25:22 +00:00
|
|
|
const int quality,
|
2016-01-27 08:50:39 +00:00
|
|
|
const int lgwin,
|
2013-11-15 18:02:17 +00:00
|
|
|
Hasher* hasher,
|
2014-10-28 12:25:22 +00:00
|
|
|
int* dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t* last_insert_len,
|
2014-10-28 12:25:22 +00:00
|
|
|
Command* commands,
|
2015-10-28 16:44:47 +00:00
|
|
|
size_t* num_commands,
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t* num_literals) {
|
2016-01-27 08:50:39 +00:00
|
|
|
// Set maximum distance, see section 9.1. of the spec.
|
|
|
|
const size_t max_backward_limit = (1 << lgwin) - 16;
|
|
|
|
|
2016-01-07 16:10:34 +00:00
|
|
|
// Choose which init method is faster.
|
|
|
|
// memset is about 100 times faster than hasher->InitForData().
|
|
|
|
const size_t kMaxBytesForPartialHashInit = Hasher::kHashMapSize >> 7;
|
|
|
|
if (position == 0 && is_last && num_bytes <= kMaxBytesForPartialHashInit) {
|
|
|
|
hasher->InitForData(ringbuffer, num_bytes);
|
|
|
|
} else {
|
|
|
|
hasher->Init();
|
|
|
|
}
|
2014-10-28 12:25:22 +00:00
|
|
|
if (num_bytes >= 3 && position >= 3) {
|
|
|
|
// Prepare the hashes for three last bytes of the last write.
|
|
|
|
// These could not be calculated before, since they require knowledge
|
|
|
|
// of both the previous and the current block.
|
|
|
|
hasher->Store(&ringbuffer[(position - 3) & ringbuffer_mask],
|
2015-10-28 16:44:47 +00:00
|
|
|
static_cast<uint32_t>(position - 3));
|
2014-10-28 12:25:22 +00:00
|
|
|
hasher->Store(&ringbuffer[(position - 2) & ringbuffer_mask],
|
2015-10-28 16:44:47 +00:00
|
|
|
static_cast<uint32_t>(position - 2));
|
2014-10-28 12:25:22 +00:00
|
|
|
hasher->Store(&ringbuffer[(position - 1) & ringbuffer_mask],
|
2015-10-28 16:44:47 +00:00
|
|
|
static_cast<uint32_t>(position - 1));
|
2014-10-28 12:25:22 +00:00
|
|
|
}
|
|
|
|
const Command * const orig_commands = commands;
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t insert_length = *last_insert_len;
|
2013-11-15 18:02:17 +00:00
|
|
|
size_t i = position & ringbuffer_mask;
|
2015-10-26 16:08:57 +00:00
|
|
|
const size_t i_diff = position - i;
|
2013-11-15 18:02:17 +00:00
|
|
|
const size_t i_end = i + num_bytes;
|
2013-10-23 11:06:13 +00:00
|
|
|
|
2014-10-28 12:25:22 +00:00
|
|
|
// For speed up heuristics for random data.
|
2015-10-28 16:44:47 +00:00
|
|
|
const size_t random_heuristics_window_size = quality < 9 ? 64 : 512;
|
|
|
|
size_t apply_random_heuristics = i + random_heuristics_window_size;
|
2014-03-20 13:32:35 +00:00
|
|
|
|
2015-06-12 14:50:49 +00:00
|
|
|
// Minimum score to accept a backward reference.
|
|
|
|
const int kMinScore = 4.0;
|
|
|
|
|
2015-08-28 14:09:23 +00:00
|
|
|
while (i + Hasher::kHashTypeLength - 1 < i_end) {
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t max_length = i_end - i;
|
|
|
|
size_t max_distance = std::min(i + i_diff, max_backward_limit);
|
|
|
|
size_t best_len = 0;
|
|
|
|
size_t best_len_code = 0;
|
|
|
|
size_t best_dist = 0;
|
2015-06-12 14:50:49 +00:00
|
|
|
double best_score = kMinScore;
|
2013-10-23 11:06:13 +00:00
|
|
|
bool match_found = hasher->FindLongestMatch(
|
2014-10-28 12:25:22 +00:00
|
|
|
ringbuffer, ringbuffer_mask,
|
2015-10-28 16:44:47 +00:00
|
|
|
dist_cache, static_cast<uint32_t>(i + i_diff), max_length, max_distance,
|
2014-10-28 12:25:22 +00:00
|
|
|
&best_len, &best_len_code, &best_dist, &best_score);
|
2013-10-23 11:06:13 +00:00
|
|
|
if (match_found) {
|
2015-06-12 14:25:41 +00:00
|
|
|
// Found a match. Let's look for something even better ahead.
|
|
|
|
int delayed_backward_references_in_row = 0;
|
|
|
|
for (;;) {
|
|
|
|
--max_length;
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t best_len_2 =
|
|
|
|
quality < 5 ? std::min(best_len - 1, max_length) : 0;
|
|
|
|
size_t best_len_code_2 = 0;
|
|
|
|
size_t best_dist_2 = 0;
|
2015-06-12 14:50:49 +00:00
|
|
|
double best_score_2 = kMinScore;
|
2016-01-07 15:27:49 +00:00
|
|
|
max_distance = std::min(i + i_diff + 1, max_backward_limit);
|
2015-06-12 14:25:41 +00:00
|
|
|
match_found = hasher->FindLongestMatch(
|
|
|
|
ringbuffer, ringbuffer_mask,
|
2015-10-28 16:44:47 +00:00
|
|
|
dist_cache, static_cast<uint32_t>(i + i_diff + 1),
|
|
|
|
max_length, max_distance,
|
2015-06-12 14:25:41 +00:00
|
|
|
&best_len_2, &best_len_code_2, &best_dist_2, &best_score_2);
|
|
|
|
double cost_diff_lazy = 7.0;
|
|
|
|
if (match_found && best_score_2 >= best_score + cost_diff_lazy) {
|
|
|
|
// Ok, let's just write one byte for now and start a match from the
|
2014-02-14 14:04:23 +00:00
|
|
|
// next byte.
|
2015-06-12 14:25:41 +00:00
|
|
|
++i;
|
|
|
|
++insert_length;
|
|
|
|
best_len = best_len_2;
|
|
|
|
best_len_code = best_len_code_2;
|
|
|
|
best_dist = best_dist_2;
|
|
|
|
best_score = best_score_2;
|
|
|
|
if (++delayed_backward_references_in_row < 4) {
|
|
|
|
continue;
|
2014-02-14 14:04:23 +00:00
|
|
|
}
|
2013-10-23 11:06:13 +00:00
|
|
|
}
|
2015-06-12 14:25:41 +00:00
|
|
|
break;
|
2013-10-23 11:06:13 +00:00
|
|
|
}
|
2014-03-20 13:32:35 +00:00
|
|
|
apply_random_heuristics =
|
|
|
|
i + 2 * best_len + random_heuristics_window_size;
|
2016-01-07 15:27:49 +00:00
|
|
|
max_distance = std::min(i + i_diff, max_backward_limit);
|
2015-05-07 15:40:00 +00:00
|
|
|
// The first 16 codes are special shortcodes, and the minimum offset is 1.
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t distance_code =
|
2015-06-12 14:25:41 +00:00
|
|
|
ComputeDistanceCode(best_dist, max_distance, quality, dist_cache);
|
|
|
|
if (best_dist <= max_distance && distance_code > 0) {
|
|
|
|
dist_cache[3] = dist_cache[2];
|
|
|
|
dist_cache[2] = dist_cache[1];
|
|
|
|
dist_cache[1] = dist_cache[0];
|
2016-01-07 15:27:49 +00:00
|
|
|
dist_cache[0] = static_cast<int>(best_dist);
|
2014-03-20 13:32:35 +00:00
|
|
|
}
|
2014-10-28 12:25:22 +00:00
|
|
|
Command cmd(insert_length, best_len, best_len_code, distance_code);
|
|
|
|
*commands++ = cmd;
|
2015-04-28 08:12:47 +00:00
|
|
|
*num_literals += insert_length;
|
2014-10-28 12:25:22 +00:00
|
|
|
insert_length = 0;
|
2015-06-12 14:25:41 +00:00
|
|
|
// Put the hash keys into the table, if there are enough
|
|
|
|
// bytes left.
|
2016-01-27 08:50:39 +00:00
|
|
|
for (size_t j = 2; j < best_len; ++j) {
|
2015-10-28 16:44:47 +00:00
|
|
|
hasher->Store(&ringbuffer[i + j],
|
|
|
|
static_cast<uint32_t>(i + i_diff + j));
|
2014-02-14 14:04:23 +00:00
|
|
|
}
|
2015-06-12 14:25:41 +00:00
|
|
|
i += best_len;
|
2013-10-23 11:06:13 +00:00
|
|
|
} else {
|
|
|
|
++insert_length;
|
|
|
|
++i;
|
2014-03-20 13:32:35 +00:00
|
|
|
// If we have not seen matches for a long time, we can skip some
|
|
|
|
// match lookups. Unsuccessful match lookups are very very expensive
|
|
|
|
// and this kind of a heuristic speeds up compression quite
|
|
|
|
// a lot.
|
|
|
|
if (i > apply_random_heuristics) {
|
|
|
|
// Going through uncompressible data, jump.
|
|
|
|
if (i > apply_random_heuristics + 4 * random_heuristics_window_size) {
|
|
|
|
// It is quite a long time since we saw a copy, so we assume
|
|
|
|
// that this data is not compressible, and store hashes less
|
|
|
|
// often. Hashes of non compressible data are less likely to
|
|
|
|
// turn out to be useful in the future, too, so we store less of
|
|
|
|
// them to not to flood out the hash table of good compressible
|
|
|
|
// data.
|
2015-10-28 16:44:47 +00:00
|
|
|
size_t i_jump = std::min(i + 16, i_end - 4);
|
2014-03-20 13:32:35 +00:00
|
|
|
for (; i < i_jump; i += 4) {
|
2015-10-28 16:44:47 +00:00
|
|
|
hasher->Store(ringbuffer + i, static_cast<uint32_t>(i + i_diff));
|
2014-03-20 13:32:35 +00:00
|
|
|
insert_length += 4;
|
|
|
|
}
|
|
|
|
} else {
|
2015-10-28 16:44:47 +00:00
|
|
|
size_t i_jump = std::min(i + 8, i_end - 3);
|
2014-03-20 13:32:35 +00:00
|
|
|
for (; i < i_jump; i += 2) {
|
2015-10-28 16:44:47 +00:00
|
|
|
hasher->Store(ringbuffer + i, static_cast<uint32_t>(i + i_diff));
|
2014-03-20 13:32:35 +00:00
|
|
|
insert_length += 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-10-23 11:06:13 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-07 15:27:49 +00:00
|
|
|
insert_length += i_end - i;
|
2014-10-28 12:25:22 +00:00
|
|
|
*last_insert_len = insert_length;
|
2016-01-07 15:27:49 +00:00
|
|
|
*num_commands += static_cast<size_t>(commands - orig_commands);
|
2013-10-23 11:06:13 +00:00
|
|
|
}
|
|
|
|
|
2014-03-20 13:32:35 +00:00
|
|
|
void CreateBackwardReferences(size_t num_bytes,
|
|
|
|
size_t position,
|
2016-01-07 15:27:49 +00:00
|
|
|
bool is_last,
|
2014-03-20 13:32:35 +00:00
|
|
|
const uint8_t* ringbuffer,
|
|
|
|
size_t ringbuffer_mask,
|
2014-10-28 12:25:22 +00:00
|
|
|
const int quality,
|
2016-01-27 08:50:39 +00:00
|
|
|
const int lgwin,
|
2014-03-20 13:32:35 +00:00
|
|
|
Hashers* hashers,
|
2014-10-28 12:25:22 +00:00
|
|
|
int hash_type,
|
|
|
|
int* dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t* last_insert_len,
|
2014-10-28 12:25:22 +00:00
|
|
|
Command* commands,
|
2015-10-28 16:44:47 +00:00
|
|
|
size_t* num_commands,
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t* num_literals) {
|
2015-06-12 14:25:41 +00:00
|
|
|
bool zopflify = quality > 9;
|
|
|
|
if (zopflify) {
|
2016-01-27 08:50:39 +00:00
|
|
|
Hashers::H10* hasher = hashers->hash_h10;
|
|
|
|
hasher->Init(lgwin, position, num_bytes, is_last);
|
|
|
|
if (num_bytes >= 3 && position >= kMaxTreeCompLength) {
|
|
|
|
// Store the last `kMaxTreeCompLength - 1` positions in the hasher.
|
2015-06-12 14:25:41 +00:00
|
|
|
// These could not be calculated before, since they require knowledge
|
|
|
|
// of both the previous and the current block.
|
2016-01-27 08:50:39 +00:00
|
|
|
for (size_t i = position - kMaxTreeCompLength + 1; i < position; ++i) {
|
|
|
|
hasher->Store(ringbuffer, ringbuffer_mask, i, num_bytes + position - i);
|
|
|
|
}
|
2015-06-12 14:25:41 +00:00
|
|
|
}
|
2016-01-27 08:50:39 +00:00
|
|
|
// Set maximum distance, see section 9.1. of the spec.
|
|
|
|
const size_t max_backward_limit = (1 << lgwin) - 16;
|
2016-01-07 15:27:49 +00:00
|
|
|
std::vector<uint32_t> num_matches(num_bytes);
|
2016-01-27 08:50:39 +00:00
|
|
|
std::vector<BackwardMatch> matches(4 * num_bytes);
|
2015-06-12 14:25:41 +00:00
|
|
|
size_t cur_match_pos = 0;
|
|
|
|
for (size_t i = 0; i + 3 < num_bytes; ++i) {
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t max_distance = std::min(position + i, max_backward_limit);
|
|
|
|
size_t max_length = num_bytes - i;
|
2016-01-27 08:50:39 +00:00
|
|
|
// Ensure that we have enough free slots.
|
|
|
|
if (matches.size() < cur_match_pos + Hashers::H10::kMaxNumMatches) {
|
|
|
|
matches.resize(cur_match_pos + Hashers::H10::kMaxNumMatches);
|
2015-06-12 14:25:41 +00:00
|
|
|
}
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t num_found_matches = hasher->FindAllMatches(
|
|
|
|
ringbuffer, ringbuffer_mask, position + i, max_length, max_distance,
|
|
|
|
&matches[cur_match_pos]);
|
2016-01-27 08:50:39 +00:00
|
|
|
const size_t cur_match_end = cur_match_pos + num_found_matches;
|
|
|
|
for (size_t j = cur_match_pos; j + 1 < cur_match_end; ++j) {
|
|
|
|
assert(matches[j].length() < matches[j + 1].length());
|
|
|
|
assert(matches[j].distance > max_distance ||
|
|
|
|
matches[j].distance <= matches[j + 1].distance);
|
|
|
|
}
|
2016-01-07 15:27:49 +00:00
|
|
|
num_matches[i] = static_cast<uint32_t>(num_found_matches);
|
2016-01-27 08:50:39 +00:00
|
|
|
if (num_found_matches > 0) {
|
|
|
|
const size_t match_len = matches[cur_match_end - 1].length();
|
2015-06-12 14:25:41 +00:00
|
|
|
if (match_len > kMaxZopfliLen) {
|
2016-01-27 08:50:39 +00:00
|
|
|
matches[cur_match_pos++] = matches[cur_match_end - 1];
|
|
|
|
num_matches[i] = 1;
|
2016-01-07 15:27:49 +00:00
|
|
|
for (size_t j = 1; j < match_len; ++j) {
|
2015-06-12 14:25:41 +00:00
|
|
|
++i;
|
2016-01-27 08:50:39 +00:00
|
|
|
if (match_len - j < 64) {
|
|
|
|
hasher->Store(ringbuffer, ringbuffer_mask, position + i,
|
|
|
|
num_bytes - i);
|
|
|
|
}
|
2015-06-12 14:25:41 +00:00
|
|
|
num_matches[i] = 0;
|
|
|
|
}
|
2016-01-27 08:50:39 +00:00
|
|
|
} else {
|
|
|
|
cur_match_pos = cur_match_end;
|
2015-06-12 14:25:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-01-07 15:27:49 +00:00
|
|
|
size_t orig_num_literals = *num_literals;
|
|
|
|
size_t orig_last_insert_len = *last_insert_len;
|
2015-06-12 14:25:41 +00:00
|
|
|
int orig_dist_cache[4] = {
|
|
|
|
dist_cache[0], dist_cache[1], dist_cache[2], dist_cache[3]
|
|
|
|
};
|
2015-10-28 16:44:47 +00:00
|
|
|
size_t orig_num_commands = *num_commands;
|
2016-01-07 15:27:49 +00:00
|
|
|
static const size_t kIterations = 2;
|
|
|
|
for (size_t i = 0; i < kIterations; i++) {
|
2015-06-12 14:25:41 +00:00
|
|
|
ZopfliCostModel model;
|
|
|
|
if (i == 0) {
|
|
|
|
model.SetFromLiteralCosts(num_bytes, position,
|
2015-10-01 13:10:42 +00:00
|
|
|
ringbuffer, ringbuffer_mask);
|
2015-06-12 14:25:41 +00:00
|
|
|
} else {
|
|
|
|
model.SetFromCommands(num_bytes, position,
|
|
|
|
ringbuffer, ringbuffer_mask,
|
|
|
|
commands, *num_commands - orig_num_commands,
|
|
|
|
orig_last_insert_len);
|
|
|
|
}
|
|
|
|
*num_commands = orig_num_commands;
|
|
|
|
*num_literals = orig_num_literals;
|
|
|
|
*last_insert_len = orig_last_insert_len;
|
|
|
|
memcpy(dist_cache, orig_dist_cache, 4 * sizeof(dist_cache[0]));
|
|
|
|
ZopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask,
|
2015-06-12 14:50:49 +00:00
|
|
|
max_backward_limit, model, num_matches, matches, dist_cache,
|
2015-06-12 14:25:41 +00:00
|
|
|
last_insert_len, commands, num_commands, num_literals);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-03-20 13:32:35 +00:00
|
|
|
switch (hash_type) {
|
2014-10-28 12:25:22 +00:00
|
|
|
case 2:
|
2015-06-12 14:25:41 +00:00
|
|
|
CreateBackwardReferences<Hashers::H2>(
|
2016-01-07 15:27:49 +00:00
|
|
|
num_bytes, position, is_last, ringbuffer, ringbuffer_mask,
|
2016-01-27 08:50:39 +00:00
|
|
|
quality, lgwin, hashers->hash_h2, dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
last_insert_len, commands, num_commands, num_literals);
|
2014-10-28 12:25:22 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2015-06-12 14:25:41 +00:00
|
|
|
CreateBackwardReferences<Hashers::H3>(
|
2016-01-07 15:27:49 +00:00
|
|
|
num_bytes, position, is_last, ringbuffer, ringbuffer_mask,
|
2016-01-27 08:50:39 +00:00
|
|
|
quality, lgwin, hashers->hash_h3, dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
last_insert_len, commands, num_commands, num_literals);
|
2014-10-28 12:25:22 +00:00
|
|
|
break;
|
|
|
|
case 4:
|
2015-06-12 14:25:41 +00:00
|
|
|
CreateBackwardReferences<Hashers::H4>(
|
2016-01-07 15:27:49 +00:00
|
|
|
num_bytes, position, is_last, ringbuffer, ringbuffer_mask,
|
2016-01-27 08:50:39 +00:00
|
|
|
quality, lgwin, hashers->hash_h4, dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
last_insert_len, commands, num_commands, num_literals);
|
2014-03-20 13:32:35 +00:00
|
|
|
break;
|
2014-10-28 12:25:22 +00:00
|
|
|
case 5:
|
2015-06-12 14:25:41 +00:00
|
|
|
CreateBackwardReferences<Hashers::H5>(
|
2016-01-07 15:27:49 +00:00
|
|
|
num_bytes, position, is_last, ringbuffer, ringbuffer_mask,
|
2016-01-27 08:50:39 +00:00
|
|
|
quality, lgwin, hashers->hash_h5, dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
last_insert_len, commands, num_commands, num_literals);
|
2014-10-28 12:25:22 +00:00
|
|
|
break;
|
|
|
|
case 6:
|
2015-06-12 14:25:41 +00:00
|
|
|
CreateBackwardReferences<Hashers::H6>(
|
2016-01-07 15:27:49 +00:00
|
|
|
num_bytes, position, is_last, ringbuffer, ringbuffer_mask,
|
2016-01-27 08:50:39 +00:00
|
|
|
quality, lgwin, hashers->hash_h6, dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
last_insert_len, commands, num_commands, num_literals);
|
2014-10-28 12:25:22 +00:00
|
|
|
break;
|
|
|
|
case 7:
|
2015-06-12 14:25:41 +00:00
|
|
|
CreateBackwardReferences<Hashers::H7>(
|
2016-01-07 15:27:49 +00:00
|
|
|
num_bytes, position, is_last, ringbuffer, ringbuffer_mask,
|
2016-01-27 08:50:39 +00:00
|
|
|
quality, lgwin, hashers->hash_h7, dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
last_insert_len, commands, num_commands, num_literals);
|
2014-10-28 12:25:22 +00:00
|
|
|
break;
|
|
|
|
case 8:
|
2015-06-12 14:25:41 +00:00
|
|
|
CreateBackwardReferences<Hashers::H8>(
|
2016-01-07 15:27:49 +00:00
|
|
|
num_bytes, position, is_last, ringbuffer, ringbuffer_mask,
|
2016-01-27 08:50:39 +00:00
|
|
|
quality, lgwin, hashers->hash_h8, dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
last_insert_len, commands, num_commands, num_literals);
|
2014-10-28 12:25:22 +00:00
|
|
|
break;
|
|
|
|
case 9:
|
2015-06-12 14:25:41 +00:00
|
|
|
CreateBackwardReferences<Hashers::H9>(
|
2016-01-07 15:27:49 +00:00
|
|
|
num_bytes, position, is_last, ringbuffer, ringbuffer_mask,
|
2016-01-27 08:50:39 +00:00
|
|
|
quality, lgwin, hashers->hash_h9, dist_cache,
|
2016-01-07 15:27:49 +00:00
|
|
|
last_insert_len, commands, num_commands, num_literals);
|
2014-03-20 13:32:35 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-23 11:06:13 +00:00
|
|
|
} // namespace brotli
|