mirror of
https://github.com/google/brotli.git
synced 2024-11-22 11:40:06 +00:00
Merge pull request #245 from szabadka/master
Fix --Wconversion and --pedantic-erros for the encoder.
This commit is contained in:
commit
24469b81d6
@ -38,14 +38,14 @@ class ZopfliCostModel {
|
||||
const uint8_t* ringbuffer,
|
||||
size_t ringbuffer_mask,
|
||||
const Command* commands,
|
||||
int num_commands,
|
||||
size_t num_commands,
|
||||
int last_insert_len) {
|
||||
std::vector<int> histogram_literal(256, 0);
|
||||
std::vector<int> histogram_cmd(kNumCommandPrefixes, 0);
|
||||
std::vector<int> histogram_dist(kNumDistancePrefixes, 0);
|
||||
|
||||
size_t pos = position - last_insert_len;
|
||||
for (int i = 0; i < num_commands; i++) {
|
||||
for (size_t i = 0; i < num_commands; i++) {
|
||||
int inslength = commands[i].insert_len_;
|
||||
int copylength = commands[i].copy_len_;
|
||||
int distcode = commands[i].dist_prefix_;
|
||||
@ -72,7 +72,7 @@ class ZopfliCostModel {
|
||||
|
||||
literal_costs_.resize(num_bytes + 1);
|
||||
literal_costs_[0] = 0.0;
|
||||
for (int i = 0; i < num_bytes; ++i) {
|
||||
for (size_t i = 0; i < num_bytes; ++i) {
|
||||
literal_costs_[i + 1] = literal_costs_[i] +
|
||||
cost_literal[ringbuffer[(position + i) & ringbuffer_mask]];
|
||||
}
|
||||
@ -87,7 +87,7 @@ class ZopfliCostModel {
|
||||
ringbuffer, &literal_cost[0]);
|
||||
literal_costs_.resize(num_bytes + 1);
|
||||
literal_costs_[0] = 0.0;
|
||||
for (int i = 0; i < num_bytes; ++i) {
|
||||
for (size_t i = 0; i < num_bytes; ++i) {
|
||||
literal_costs_[i + 1] = literal_costs_[i] + literal_cost[i];
|
||||
}
|
||||
cost_cmd_.resize(kNumCommandPrefixes);
|
||||
@ -103,23 +103,21 @@ class ZopfliCostModel {
|
||||
|
||||
double GetCommandCost(
|
||||
int dist_code, int length_code, int insert_length) const {
|
||||
int inscode = GetInsertLengthCode(insert_length);
|
||||
int copycode = GetCopyLengthCode(length_code);
|
||||
uint16_t cmdcode = CombineLengthCodes(inscode, copycode, dist_code);
|
||||
uint64_t insnumextra = insextra[inscode];
|
||||
uint64_t copynumextra = copyextra[copycode];
|
||||
uint16_t inscode = GetInsertLengthCode(insert_length);
|
||||
uint16_t copycode = GetCopyLengthCode(length_code);
|
||||
uint16_t cmdcode = CombineLengthCodes(inscode, copycode, dist_code == 0);
|
||||
uint16_t dist_symbol;
|
||||
uint32_t distextra;
|
||||
PrefixEncodeCopyDistance(dist_code, 0, 0, &dist_symbol, &distextra);
|
||||
uint32_t distnumextra = distextra >> 24;
|
||||
|
||||
double result = insnumextra + copynumextra + distnumextra;
|
||||
double result = insextra[inscode] + copyextra[copycode] + distnumextra;
|
||||
result += cost_cmd_[cmdcode];
|
||||
if (cmdcode >= 128) result += cost_dist_[dist_symbol];
|
||||
return result;
|
||||
}
|
||||
|
||||
double GetLiteralCosts(int from, int to) const {
|
||||
double GetLiteralCosts(size_t from, size_t to) const {
|
||||
return literal_costs_[to] - literal_costs_[from];
|
||||
}
|
||||
|
||||
@ -231,7 +229,7 @@ inline void UpdateZopfliNode(ZopfliNode* nodes, size_t pos, size_t start_pos,
|
||||
next.length_code = len_code;
|
||||
next.distance = dist;
|
||||
next.distance_code = dist_code;
|
||||
next.insert_length = pos - start_pos;
|
||||
next.insert_length = static_cast<int>(pos - start_pos);
|
||||
next.cost = cost;
|
||||
SetDistanceCache(dist, dist_code, max_dist, dist_cache,
|
||||
&next.distance_cache[0]);
|
||||
@ -264,7 +262,7 @@ class StartPosQueue {
|
||||
++idx_;
|
||||
}
|
||||
|
||||
int size() const { return std::min<int>(idx_, mask_ + 1); }
|
||||
int size() const { return std::min(idx_, mask_ + 1); }
|
||||
|
||||
size_t GetStartPos(int k) const {
|
||||
return q_[(idx_ - k - 1) & mask_].first;
|
||||
@ -318,7 +316,7 @@ void ZopfliIterate(size_t num_bytes,
|
||||
int* dist_cache,
|
||||
int* last_insert_len,
|
||||
Command* commands,
|
||||
int* num_commands,
|
||||
size_t* num_commands,
|
||||
int* num_literals) {
|
||||
const Command * const orig_commands = commands;
|
||||
|
||||
@ -334,8 +332,8 @@ void ZopfliIterate(size_t num_bytes,
|
||||
for (size_t i = 0; i + 3 < num_bytes; i++) {
|
||||
size_t cur_ix = position + i;
|
||||
size_t cur_ix_masked = cur_ix & ringbuffer_mask;
|
||||
size_t max_distance = std::min(cur_ix, max_backward_limit);
|
||||
int max_length = num_bytes - i;
|
||||
int max_distance = static_cast<int>(std::min(cur_ix, max_backward_limit));
|
||||
int max_length = static_cast<int>(num_bytes - i);
|
||||
|
||||
queue.Push(i, nodes[i].cost - model.GetLiteralCosts(0, i));
|
||||
|
||||
@ -344,7 +342,7 @@ void ZopfliIterate(size_t num_bytes,
|
||||
|
||||
// Go over the command starting positions in order of increasing cost
|
||||
// difference.
|
||||
for (size_t k = 0; k < 5 && k < queue.size(); ++k) {
|
||||
for (int k = 0; k < 5 && k < queue.size(); ++k) {
|
||||
const size_t start = queue.GetStartPos(k);
|
||||
const double start_costdiff =
|
||||
nodes[start].cost - model.GetLiteralCosts(0, start);
|
||||
@ -371,12 +369,13 @@ void ZopfliIterate(size_t num_bytes,
|
||||
ringbuffer[prev_ix + best_len]) {
|
||||
continue;
|
||||
}
|
||||
const size_t len =
|
||||
const int len =
|
||||
FindMatchLengthWithLimit(&ringbuffer[prev_ix],
|
||||
&ringbuffer[cur_ix_masked],
|
||||
max_length);
|
||||
for (int l = best_len + 1; l <= len; ++l) {
|
||||
double cmd_cost = model.GetCommandCost(j, l, i - start);
|
||||
const int inslen = static_cast<int>(i - start);
|
||||
double cmd_cost = model.GetCommandCost(j, l, inslen);
|
||||
double cost = start_costdiff + cmd_cost + model.GetLiteralCosts(0, i);
|
||||
if (cost < nodes[i + l].cost) {
|
||||
UpdateZopfliNode(&nodes[0], i, start, l, l, backward, j,
|
||||
@ -409,8 +408,8 @@ void ZopfliIterate(size_t num_bytes,
|
||||
}
|
||||
for (; len <= max_len; ++len) {
|
||||
int len_code = is_dictionary_match ? match.length_code() : len;
|
||||
double cmd_cost =
|
||||
model.GetCommandCost(dist_code, len_code, i - start);
|
||||
const int inslen = static_cast<int>(i - start);
|
||||
double cmd_cost = model.GetCommandCost(dist_code, len_code, inslen);
|
||||
double cost = start_costdiff + cmd_cost + model.GetLiteralCosts(0, i);
|
||||
if (cost < nodes[i + len].cost) {
|
||||
UpdateZopfliNode(&nodes[0], i, start, len, len_code, dist,
|
||||
@ -457,7 +456,8 @@ void ZopfliIterate(size_t num_bytes,
|
||||
}
|
||||
int distance = next.distance;
|
||||
int len_code = next.length_code;
|
||||
size_t max_distance = std::min(position + pos, max_backward_limit);
|
||||
int max_distance =
|
||||
static_cast<int>(std::min(position + pos, max_backward_limit));
|
||||
bool is_dictionary = (distance > max_distance);
|
||||
int dist_code = next.distance_code;
|
||||
|
||||
@ -475,7 +475,7 @@ void ZopfliIterate(size_t num_bytes,
|
||||
insert_length = 0;
|
||||
pos += copy_length;
|
||||
}
|
||||
*last_insert_len += num_bytes - pos;
|
||||
*last_insert_len += static_cast<int>(num_bytes - pos);
|
||||
*num_commands += (commands - orig_commands);
|
||||
}
|
||||
|
||||
@ -490,18 +490,18 @@ void CreateBackwardReferences(size_t num_bytes,
|
||||
int* dist_cache,
|
||||
int* last_insert_len,
|
||||
Command* commands,
|
||||
int* num_commands,
|
||||
size_t* num_commands,
|
||||
int* num_literals) {
|
||||
if (num_bytes >= 3 && position >= 3) {
|
||||
// Prepare the hashes for three last bytes of the last write.
|
||||
// These could not be calculated before, since they require knowledge
|
||||
// of both the previous and the current block.
|
||||
hasher->Store(&ringbuffer[(position - 3) & ringbuffer_mask],
|
||||
position - 3);
|
||||
static_cast<uint32_t>(position - 3));
|
||||
hasher->Store(&ringbuffer[(position - 2) & ringbuffer_mask],
|
||||
position - 2);
|
||||
static_cast<uint32_t>(position - 2));
|
||||
hasher->Store(&ringbuffer[(position - 1) & ringbuffer_mask],
|
||||
position - 1);
|
||||
static_cast<uint32_t>(position - 1));
|
||||
}
|
||||
const Command * const orig_commands = commands;
|
||||
int insert_length = *last_insert_len;
|
||||
@ -510,22 +510,23 @@ void CreateBackwardReferences(size_t num_bytes,
|
||||
const size_t i_end = i + num_bytes;
|
||||
|
||||
// For speed up heuristics for random data.
|
||||
const int random_heuristics_window_size = quality < 9 ? 64 : 512;
|
||||
int apply_random_heuristics = i + random_heuristics_window_size;
|
||||
const size_t random_heuristics_window_size = quality < 9 ? 64 : 512;
|
||||
size_t apply_random_heuristics = i + random_heuristics_window_size;
|
||||
|
||||
// Minimum score to accept a backward reference.
|
||||
const int kMinScore = 4.0;
|
||||
|
||||
while (i + Hasher::kHashTypeLength - 1 < i_end) {
|
||||
int max_length = i_end - i;
|
||||
size_t max_distance = std::min(i + i_diff, max_backward_limit);
|
||||
int max_length = static_cast<int>(i_end - i);
|
||||
int max_distance =
|
||||
static_cast<int>(std::min(i + i_diff, max_backward_limit));
|
||||
int best_len = 0;
|
||||
int best_len_code = 0;
|
||||
int best_dist = 0;
|
||||
double best_score = kMinScore;
|
||||
bool match_found = hasher->FindLongestMatch(
|
||||
ringbuffer, ringbuffer_mask,
|
||||
dist_cache, i + i_diff, max_length, max_distance,
|
||||
dist_cache, static_cast<uint32_t>(i + i_diff), max_length, max_distance,
|
||||
&best_len, &best_len_code, &best_dist, &best_score);
|
||||
if (match_found) {
|
||||
// Found a match. Let's look for something even better ahead.
|
||||
@ -536,11 +537,13 @@ void CreateBackwardReferences(size_t num_bytes,
|
||||
int best_len_code_2 = 0;
|
||||
int best_dist_2 = 0;
|
||||
double best_score_2 = kMinScore;
|
||||
max_distance = std::min(i + i_diff + 1, max_backward_limit);
|
||||
hasher->Store(ringbuffer + i, i + i_diff);
|
||||
max_distance =
|
||||
static_cast<int>(std::min(i + i_diff + 1, max_backward_limit));
|
||||
hasher->Store(ringbuffer + i, static_cast<uint32_t>(i + i_diff));
|
||||
match_found = hasher->FindLongestMatch(
|
||||
ringbuffer, ringbuffer_mask,
|
||||
dist_cache, i + i_diff + 1, max_length, max_distance,
|
||||
dist_cache, static_cast<uint32_t>(i + i_diff + 1),
|
||||
max_length, max_distance,
|
||||
&best_len_2, &best_len_code_2, &best_dist_2, &best_score_2);
|
||||
double cost_diff_lazy = 7.0;
|
||||
if (match_found && best_score_2 >= best_score + cost_diff_lazy) {
|
||||
@ -560,7 +563,7 @@ void CreateBackwardReferences(size_t num_bytes,
|
||||
}
|
||||
apply_random_heuristics =
|
||||
i + 2 * best_len + random_heuristics_window_size;
|
||||
max_distance = std::min(i + i_diff, max_backward_limit);
|
||||
max_distance = static_cast<int>(std::min(i + i_diff, max_backward_limit));
|
||||
// The first 16 codes are special shortcodes, and the minimum offset is 1.
|
||||
int distance_code =
|
||||
ComputeDistanceCode(best_dist, max_distance, quality, dist_cache);
|
||||
@ -577,12 +580,13 @@ void CreateBackwardReferences(size_t num_bytes,
|
||||
// Put the hash keys into the table, if there are enough
|
||||
// bytes left.
|
||||
for (int j = 1; j < best_len; ++j) {
|
||||
hasher->Store(&ringbuffer[i + j], i + i_diff + j);
|
||||
hasher->Store(&ringbuffer[i + j],
|
||||
static_cast<uint32_t>(i + i_diff + j));
|
||||
}
|
||||
i += best_len;
|
||||
} else {
|
||||
++insert_length;
|
||||
hasher->Store(ringbuffer + i, i + i_diff);
|
||||
hasher->Store(ringbuffer + i, static_cast<uint32_t>(i + i_diff));
|
||||
++i;
|
||||
// If we have not seen matches for a long time, we can skip some
|
||||
// match lookups. Unsuccessful match lookups are very very expensive
|
||||
@ -597,24 +601,24 @@ void CreateBackwardReferences(size_t num_bytes,
|
||||
// turn out to be useful in the future, too, so we store less of
|
||||
// them to not to flood out the hash table of good compressible
|
||||
// data.
|
||||
int i_jump = std::min(i + 16, i_end - 4);
|
||||
size_t i_jump = std::min(i + 16, i_end - 4);
|
||||
for (; i < i_jump; i += 4) {
|
||||
hasher->Store(ringbuffer + i, i + i_diff);
|
||||
hasher->Store(ringbuffer + i, static_cast<uint32_t>(i + i_diff));
|
||||
insert_length += 4;
|
||||
}
|
||||
} else {
|
||||
int i_jump = std::min(i + 8, i_end - 3);
|
||||
size_t i_jump = std::min(i + 8, i_end - 3);
|
||||
for (; i < i_jump; i += 2) {
|
||||
hasher->Store(ringbuffer + i, i + i_diff);
|
||||
hasher->Store(ringbuffer + i, static_cast<uint32_t>(i + i_diff));
|
||||
insert_length += 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
insert_length += (i_end - i);
|
||||
insert_length += static_cast<int>(i_end - i);
|
||||
*last_insert_len = insert_length;
|
||||
*num_commands += (commands - orig_commands);
|
||||
*num_commands += commands - orig_commands;
|
||||
}
|
||||
|
||||
void CreateBackwardReferences(size_t num_bytes,
|
||||
@ -628,7 +632,7 @@ void CreateBackwardReferences(size_t num_bytes,
|
||||
int* dist_cache,
|
||||
int* last_insert_len,
|
||||
Command* commands,
|
||||
int* num_commands,
|
||||
size_t* num_commands,
|
||||
int* num_literals) {
|
||||
bool zopflify = quality > 9;
|
||||
if (zopflify) {
|
||||
@ -638,36 +642,37 @@ void CreateBackwardReferences(size_t num_bytes,
|
||||
// These could not be calculated before, since they require knowledge
|
||||
// of both the previous and the current block.
|
||||
hasher->Store(&ringbuffer[(position - 3) & ringbuffer_mask],
|
||||
position - 3);
|
||||
static_cast<uint32_t>(position - 3));
|
||||
hasher->Store(&ringbuffer[(position - 2) & ringbuffer_mask],
|
||||
position - 2);
|
||||
static_cast<uint32_t>(position - 2));
|
||||
hasher->Store(&ringbuffer[(position - 1) & ringbuffer_mask],
|
||||
position - 1);
|
||||
static_cast<uint32_t>(position - 1));
|
||||
}
|
||||
std::vector<int> num_matches(num_bytes);
|
||||
std::vector<BackwardMatch> matches(3 * num_bytes);
|
||||
size_t cur_match_pos = 0;
|
||||
for (size_t i = 0; i + 3 < num_bytes; ++i) {
|
||||
size_t max_distance = std::min(position + i, max_backward_limit);
|
||||
int max_length = num_bytes - i;
|
||||
int max_distance =
|
||||
static_cast<int>(std::min(position + i, max_backward_limit));
|
||||
int max_length = static_cast<int>(num_bytes - i);
|
||||
// Ensure that we have at least kMaxZopfliLen free slots.
|
||||
if (matches.size() < cur_match_pos + kMaxZopfliLen) {
|
||||
matches.resize(cur_match_pos + kMaxZopfliLen);
|
||||
}
|
||||
hasher->FindAllMatches(
|
||||
ringbuffer, ringbuffer_mask,
|
||||
position + i, max_length, max_distance,
|
||||
static_cast<uint32_t>(position + i), max_length, max_distance,
|
||||
&num_matches[i], &matches[cur_match_pos]);
|
||||
hasher->Store(&ringbuffer[(position + i) & ringbuffer_mask],
|
||||
position + i);
|
||||
static_cast<uint32_t>(position + i));
|
||||
cur_match_pos += num_matches[i];
|
||||
if (num_matches[i] == 1) {
|
||||
const int match_len = matches[cur_match_pos - 1].length();
|
||||
if (match_len > kMaxZopfliLen) {
|
||||
for (int j = 1; j < match_len; ++j) {
|
||||
++i;
|
||||
hasher->Store(
|
||||
&ringbuffer[(position + i) & ringbuffer_mask], position + i);
|
||||
hasher->Store(&ringbuffer[(position + i) & ringbuffer_mask],
|
||||
static_cast<uint32_t>(position + i));
|
||||
num_matches[i] = 0;
|
||||
}
|
||||
}
|
||||
@ -678,7 +683,7 @@ void CreateBackwardReferences(size_t num_bytes,
|
||||
int orig_dist_cache[4] = {
|
||||
dist_cache[0], dist_cache[1], dist_cache[2], dist_cache[3]
|
||||
};
|
||||
int orig_num_commands = *num_commands;
|
||||
size_t orig_num_commands = *num_commands;
|
||||
static const int kIterations = 2;
|
||||
for (int i = 0; i < kIterations; i++) {
|
||||
ZopfliCostModel model;
|
||||
|
@ -40,7 +40,7 @@ void CreateBackwardReferences(size_t num_bytes,
|
||||
int* dist_cache,
|
||||
int* last_insert_len,
|
||||
Command* commands,
|
||||
int* num_commands,
|
||||
size_t* num_commands,
|
||||
int* num_literals);
|
||||
|
||||
} // namespace brotli
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "./block_splitter.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@ -53,7 +54,7 @@ void CopyLiteralsToByteArray(const Command* cmds,
|
||||
std::vector<uint8_t>* literals) {
|
||||
// Count how many we have.
|
||||
size_t total_length = 0;
|
||||
for (int i = 0; i < num_commands; ++i) {
|
||||
for (size_t i = 0; i < num_commands; ++i) {
|
||||
total_length += cmds[i].insert_len_;
|
||||
}
|
||||
if (total_length == 0) {
|
||||
@ -66,7 +67,7 @@ void CopyLiteralsToByteArray(const Command* cmds,
|
||||
// Loop again, and copy this time.
|
||||
size_t pos = 0;
|
||||
size_t from_pos = offset & mask;
|
||||
for (int i = 0; i < num_commands && pos < total_length; ++i) {
|
||||
for (size_t i = 0; i < num_commands && pos < total_length; ++i) {
|
||||
size_t insert_len = cmds[i].insert_len_;
|
||||
if (from_pos + insert_len > mask) {
|
||||
size_t head_size = mask + 1 - from_pos;
|
||||
@ -87,7 +88,7 @@ void CopyCommandsToByteArray(const Command* cmds,
|
||||
const size_t num_commands,
|
||||
std::vector<uint16_t>* insert_and_copy_codes,
|
||||
std::vector<uint16_t>* distance_prefixes) {
|
||||
for (int i = 0; i < num_commands; ++i) {
|
||||
for (size_t i = 0; i < num_commands; ++i) {
|
||||
const Command& cmd = cmds[i];
|
||||
insert_and_copy_codes->push_back(cmd.cmd_prefix_);
|
||||
if (cmd.copy_len_ > 0 && cmd.cmd_prefix_ >= 128) {
|
||||
@ -110,14 +111,14 @@ void InitialEntropyCodes(const DataType* data, size_t length,
|
||||
int max_histograms,
|
||||
size_t stride,
|
||||
std::vector<HistogramType>* vec) {
|
||||
int total_histograms = length / literals_per_histogram + 1;
|
||||
int total_histograms = static_cast<int>(length) / literals_per_histogram + 1;
|
||||
if (total_histograms > max_histograms) {
|
||||
total_histograms = max_histograms;
|
||||
}
|
||||
unsigned int seed = 7;
|
||||
int block_length = length / total_histograms;
|
||||
size_t block_length = length / total_histograms;
|
||||
for (int i = 0; i < total_histograms; ++i) {
|
||||
int pos = length * i / total_histograms;
|
||||
size_t pos = length * i / total_histograms;
|
||||
if (i != 0) {
|
||||
pos += MyRand(&seed) % block_length;
|
||||
}
|
||||
@ -150,19 +151,19 @@ template<typename HistogramType, typename DataType>
|
||||
void RefineEntropyCodes(const DataType* data, size_t length,
|
||||
size_t stride,
|
||||
std::vector<HistogramType>* vec) {
|
||||
int iters =
|
||||
size_t iters =
|
||||
kIterMulForRefining * length / stride + kMinItersForRefining;
|
||||
unsigned int seed = 7;
|
||||
iters = ((iters + vec->size() - 1) / vec->size()) * vec->size();
|
||||
for (int iter = 0; iter < iters; ++iter) {
|
||||
for (size_t iter = 0; iter < iters; ++iter) {
|
||||
HistogramType sample;
|
||||
RandomSample(&seed, data, length, stride, &sample);
|
||||
int ix = iter % vec->size();
|
||||
size_t ix = iter % vec->size();
|
||||
(*vec)[ix].AddHistogram(sample);
|
||||
}
|
||||
}
|
||||
|
||||
inline static float BitCost(int count) {
|
||||
inline static double BitCost(int count) {
|
||||
return count == 0 ? -2 : FastLog2(count);
|
||||
}
|
||||
|
||||
@ -172,12 +173,13 @@ void FindBlocks(const DataType* data, const size_t length,
|
||||
const std::vector<Histogram<kSize> > &vec,
|
||||
uint8_t *block_id) {
|
||||
if (vec.size() <= 1) {
|
||||
for (int i = 0; i < length; ++i) {
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
block_id[i] = 0;
|
||||
}
|
||||
return;
|
||||
}
|
||||
int vecsize = vec.size();
|
||||
int vecsize = static_cast<int>(vec.size());
|
||||
assert(vecsize <= 256);
|
||||
double* insert_cost = new double[kSize * vecsize];
|
||||
memset(insert_cost, 0, sizeof(insert_cost[0]) * kSize * vecsize);
|
||||
for (int j = 0; j < vecsize; ++j) {
|
||||
@ -199,7 +201,7 @@ void FindBlocks(const DataType* data, const size_t length,
|
||||
// reaches block switch cost, it means that when we trace back from the last
|
||||
// position, we need to switch here.
|
||||
for (size_t byte_ix = 0; byte_ix < length; ++byte_ix) {
|
||||
int ix = byte_ix * vecsize;
|
||||
size_t ix = byte_ix * vecsize;
|
||||
int insert_cost_ix = data[byte_ix] * vecsize;
|
||||
double min_cost = 1e99;
|
||||
for (int k = 0; k < vecsize; ++k) {
|
||||
@ -207,7 +209,7 @@ void FindBlocks(const DataType* data, const size_t length,
|
||||
cost[k] += insert_cost[insert_cost_ix + k];
|
||||
if (cost[k] < min_cost) {
|
||||
min_cost = cost[k];
|
||||
block_id[byte_ix] = k;
|
||||
block_id[byte_ix] = static_cast<uint8_t>(k);
|
||||
}
|
||||
}
|
||||
double block_switch_cost = block_switch_bitcost;
|
||||
@ -224,9 +226,9 @@ void FindBlocks(const DataType* data, const size_t length,
|
||||
}
|
||||
}
|
||||
// Now trace back from the last position and switch at the marked places.
|
||||
int byte_ix = length - 1;
|
||||
int ix = byte_ix * vecsize;
|
||||
int cur_id = block_id[byte_ix];
|
||||
size_t byte_ix = length - 1;
|
||||
size_t ix = byte_ix * vecsize;
|
||||
uint8_t cur_id = block_id[byte_ix];
|
||||
while (byte_ix > 0) {
|
||||
--byte_ix;
|
||||
ix -= vecsize;
|
||||
@ -243,13 +245,13 @@ void FindBlocks(const DataType* data, const size_t length,
|
||||
int RemapBlockIds(uint8_t* block_ids, const size_t length) {
|
||||
std::map<uint8_t, uint8_t> new_id;
|
||||
int next_id = 0;
|
||||
for (int i = 0; i < length; ++i) {
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
if (new_id.find(block_ids[i]) == new_id.end()) {
|
||||
new_id[block_ids[i]] = next_id;
|
||||
new_id[block_ids[i]] = static_cast<uint8_t>(next_id);
|
||||
++next_id;
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < length; ++i) {
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
block_ids[i] = new_id[block_ids[i]];
|
||||
}
|
||||
return next_id;
|
||||
@ -260,9 +262,10 @@ void BuildBlockHistograms(const DataType* data, const size_t length,
|
||||
uint8_t* block_ids,
|
||||
std::vector<HistogramType>* histograms) {
|
||||
int num_types = RemapBlockIds(block_ids, length);
|
||||
assert(num_types <= 256);
|
||||
histograms->clear();
|
||||
histograms->resize(num_types);
|
||||
for (int i = 0; i < length; ++i) {
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
(*histograms)[block_ids[i]].Add(data[i]);
|
||||
}
|
||||
}
|
||||
@ -274,7 +277,7 @@ void ClusterBlocks(const DataType* data, const size_t length,
|
||||
std::vector<int> block_index(length);
|
||||
int cur_idx = 0;
|
||||
HistogramType cur_histogram;
|
||||
for (int i = 0; i < length; ++i) {
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
bool block_boundary = (i + 1 == length || block_ids[i] != block_ids[i + 1]);
|
||||
block_index[i] = cur_idx;
|
||||
cur_histogram.Add(data[i]);
|
||||
@ -288,12 +291,12 @@ void ClusterBlocks(const DataType* data, const size_t length,
|
||||
std::vector<int> histogram_symbols;
|
||||
// Block ids need to fit in one byte.
|
||||
static const int kMaxNumberOfBlockTypes = 256;
|
||||
ClusterHistograms(histograms, 1, histograms.size(),
|
||||
ClusterHistograms(histograms, 1, static_cast<int>(histograms.size()),
|
||||
kMaxNumberOfBlockTypes,
|
||||
&clustered_histograms,
|
||||
&histogram_symbols);
|
||||
for (int i = 0; i < length; ++i) {
|
||||
block_ids[i] = histogram_symbols[block_index[i]];
|
||||
block_ids[i] = static_cast<uint8_t>(histogram_symbols[block_index[i]]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -301,7 +304,7 @@ void BuildBlockSplit(const std::vector<uint8_t>& block_ids, BlockSplit* split) {
|
||||
int cur_id = block_ids[0];
|
||||
int cur_length = 1;
|
||||
split->num_types = -1;
|
||||
for (int i = 1; i < block_ids.size(); ++i) {
|
||||
for (size_t i = 1; i < block_ids.size(); ++i) {
|
||||
if (block_ids[i] != cur_id) {
|
||||
split->types.push_back(cur_id);
|
||||
split->lengths.push_back(cur_length);
|
||||
@ -330,7 +333,7 @@ void SplitByteVector(const std::vector<DataType>& data,
|
||||
} else if (data.size() < kMinLengthForBlockSplitting) {
|
||||
split->num_types = 1;
|
||||
split->types.push_back(0);
|
||||
split->lengths.push_back(data.size());
|
||||
split->lengths.push_back(static_cast<int>(data.size()));
|
||||
return;
|
||||
}
|
||||
std::vector<HistogramType> histograms;
|
||||
@ -401,7 +404,7 @@ void SplitBlockByTotalLength(const Command* all_commands,
|
||||
int length_limit = input_size / num_blocks + 1;
|
||||
int total_length = 0;
|
||||
std::vector<Command> cur_block;
|
||||
for (int i = 0; i < num_commands; ++i) {
|
||||
for (size_t i = 0; i < num_commands; ++i) {
|
||||
const Command& cmd = all_commands[i];
|
||||
int cmd_length = cmd.insert_len_ + cmd.copy_len_;
|
||||
if (total_length > length_limit) {
|
||||
|
@ -33,13 +33,16 @@ namespace brotli {
|
||||
// returns false if fail
|
||||
// nibblesbits represents the 2 bits to encode MNIBBLES (0-3)
|
||||
bool EncodeMlen(size_t length, int* bits, int* numbits, int* nibblesbits) {
|
||||
if (length > (1 << 24)) {
|
||||
return false;
|
||||
}
|
||||
length--; // MLEN - 1 is encoded
|
||||
int lg = length == 0 ? 1 : Log2Floor(length) + 1;
|
||||
if (lg > 24) return false;
|
||||
int lg = length == 0 ? 1 : Log2Floor(static_cast<uint32_t>(length)) + 1;
|
||||
assert(lg <= 24);
|
||||
int mnibbles = (lg < 16 ? 16 : (lg + 3)) / 4;
|
||||
*nibblesbits = mnibbles - 4;
|
||||
*numbits = mnibbles * 4;
|
||||
*bits = length;
|
||||
*bits = static_cast<int>(length);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -165,7 +168,7 @@ void StoreHuffmanTreeToBitMask(
|
||||
const std::vector<uint16_t> &code_length_bitdepth_symbols,
|
||||
int * __restrict storage_ix,
|
||||
uint8_t * __restrict storage) {
|
||||
for (int i = 0; i < huffman_tree.size(); ++i) {
|
||||
for (size_t i = 0; i < huffman_tree.size(); ++i) {
|
||||
int ix = huffman_tree[i];
|
||||
WriteBits(code_length_bitdepth[ix], code_length_bitdepth_symbols[ix],
|
||||
storage_ix, storage);
|
||||
@ -218,7 +221,7 @@ void StoreSimpleHuffmanTree(const uint8_t* depths,
|
||||
|
||||
// num = alphabet size
|
||||
// depths = symbol depths
|
||||
void StoreHuffmanTree(const uint8_t* depths, size_t num,
|
||||
void StoreHuffmanTree(const uint8_t* depths, int num,
|
||||
int *storage_ix, uint8_t *storage) {
|
||||
// Write the Huffman tree into the brotli-representation.
|
||||
std::vector<uint8_t> huffman_tree;
|
||||
@ -230,7 +233,7 @@ void StoreHuffmanTree(const uint8_t* depths, size_t num,
|
||||
|
||||
// Calculate the statistics of the Huffman tree in brotli-representation.
|
||||
int huffman_tree_histogram[kCodeLengthCodes] = { 0 };
|
||||
for (int i = 0; i < huffman_tree.size(); ++i) {
|
||||
for (size_t i = 0; i < huffman_tree.size(); ++i) {
|
||||
++huffman_tree_histogram[huffman_tree[i]];
|
||||
}
|
||||
|
||||
@ -283,7 +286,7 @@ void BuildAndStoreHuffmanTree(const int *histogram,
|
||||
uint8_t* storage) {
|
||||
int count = 0;
|
||||
int s4[4] = { 0 };
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (histogram[i]) {
|
||||
if (count < 4) {
|
||||
s4[count] = i;
|
||||
@ -318,7 +321,7 @@ void BuildAndStoreHuffmanTree(const int *histogram,
|
||||
}
|
||||
|
||||
int IndexOf(const std::vector<int>& v, int value) {
|
||||
for (int i = 0; i < v.size(); ++i) {
|
||||
for (int i = 0; i < static_cast<int>(v.size()); ++i) {
|
||||
if (v[i] == value) return i;
|
||||
}
|
||||
return -1;
|
||||
@ -335,9 +338,9 @@ void MoveToFront(std::vector<int>* v, int index) {
|
||||
std::vector<int> MoveToFrontTransform(const std::vector<int>& v) {
|
||||
if (v.empty()) return v;
|
||||
std::vector<int> mtf(*std::max_element(v.begin(), v.end()) + 1);
|
||||
for (int i = 0; i < mtf.size(); ++i) mtf[i] = i;
|
||||
for (int i = 0; i < static_cast<int>(mtf.size()); ++i) mtf[i] = i;
|
||||
std::vector<int> result(v.size());
|
||||
for (int i = 0; i < v.size(); ++i) {
|
||||
for (size_t i = 0; i < v.size(); ++i) {
|
||||
int index = IndexOf(mtf, v[i]);
|
||||
assert(index >= 0);
|
||||
result[i] = index;
|
||||
@ -357,7 +360,7 @@ void RunLengthCodeZeros(const std::vector<int>& v_in,
|
||||
std::vector<int>* v_out,
|
||||
std::vector<int>* extra_bits) {
|
||||
int max_reps = 0;
|
||||
for (int i = 0; i < v_in.size();) {
|
||||
for (size_t i = 0; i < v_in.size();) {
|
||||
for (; i < v_in.size() && v_in[i] != 0; ++i) ;
|
||||
int reps = 0;
|
||||
for (; i < v_in.size() && v_in[i] == 0; ++i) {
|
||||
@ -367,14 +370,14 @@ void RunLengthCodeZeros(const std::vector<int>& v_in,
|
||||
}
|
||||
int max_prefix = max_reps > 0 ? Log2Floor(max_reps) : 0;
|
||||
*max_run_length_prefix = std::min(max_prefix, *max_run_length_prefix);
|
||||
for (int i = 0; i < v_in.size();) {
|
||||
for (size_t i = 0; i < v_in.size();) {
|
||||
if (v_in[i] != 0) {
|
||||
v_out->push_back(v_in[i] + *max_run_length_prefix);
|
||||
extra_bits->push_back(0);
|
||||
++i;
|
||||
} else {
|
||||
int reps = 1;
|
||||
for (uint32_t k = i + 1; k < v_in.size() && v_in[k] == 0; ++k) {
|
||||
for (size_t k = i + 1; k < v_in.size() && v_in[k] == 0; ++k) {
|
||||
++reps;
|
||||
}
|
||||
i += reps;
|
||||
@ -410,7 +413,7 @@ void EncodeContextMap(const std::vector<int>& context_map,
|
||||
RunLengthCodeZeros(transformed_symbols, &max_run_length_prefix,
|
||||
&rle_symbols, &extra_bits);
|
||||
HistogramContextMap symbol_histogram;
|
||||
for (int i = 0; i < rle_symbols.size(); ++i) {
|
||||
for (size_t i = 0; i < rle_symbols.size(); ++i) {
|
||||
symbol_histogram.Add(rle_symbols[i]);
|
||||
}
|
||||
bool use_rle = max_run_length_prefix > 0;
|
||||
@ -425,7 +428,7 @@ void EncodeContextMap(const std::vector<int>& context_map,
|
||||
num_clusters + max_run_length_prefix,
|
||||
symbol_code.depth_, symbol_code.bits_,
|
||||
storage_ix, storage);
|
||||
for (int i = 0; i < rle_symbols.size(); ++i) {
|
||||
for (size_t i = 0; i < rle_symbols.size(); ++i) {
|
||||
WriteBits(symbol_code.depth_[rle_symbols[i]],
|
||||
symbol_code.bits_[rle_symbols[i]],
|
||||
storage_ix, storage);
|
||||
@ -458,7 +461,7 @@ void BuildAndStoreBlockSplitCode(const std::vector<int>& types,
|
||||
BlockSplitCode* code,
|
||||
int* storage_ix,
|
||||
uint8_t* storage) {
|
||||
const int num_blocks = types.size();
|
||||
const int num_blocks = static_cast<int>(types.size());
|
||||
std::vector<int> type_histo(num_types + 2);
|
||||
std::vector<int> length_histo(26);
|
||||
int last_type = 1;
|
||||
@ -563,8 +566,8 @@ class BlockEncoder {
|
||||
int* storage_ix, uint8_t* storage) {
|
||||
depths_.resize(histograms.size() * alphabet_size_);
|
||||
bits_.resize(histograms.size() * alphabet_size_);
|
||||
for (int i = 0; i < histograms.size(); ++i) {
|
||||
int ix = i * alphabet_size_;
|
||||
for (size_t i = 0; i < histograms.size(); ++i) {
|
||||
size_t ix = i * alphabet_size_;
|
||||
BuildAndStoreHuffmanTree(&histograms[i].data_[0], alphabet_size_,
|
||||
&depths_[ix], &bits_[ix],
|
||||
storage_ix, storage);
|
||||
@ -675,19 +678,21 @@ bool StoreMetaBlock(const uint8_t* input,
|
||||
WriteBits(2, literal_context_mode, storage_ix, storage);
|
||||
}
|
||||
|
||||
int num_literal_histograms = static_cast<int>(mb.literal_histograms.size());
|
||||
if (mb.literal_context_map.empty()) {
|
||||
StoreTrivialContextMap(mb.literal_histograms.size(), kLiteralContextBits,
|
||||
StoreTrivialContextMap(num_literal_histograms, kLiteralContextBits,
|
||||
storage_ix, storage);
|
||||
} else {
|
||||
EncodeContextMap(mb.literal_context_map, mb.literal_histograms.size(),
|
||||
EncodeContextMap(mb.literal_context_map, num_literal_histograms,
|
||||
storage_ix, storage);
|
||||
}
|
||||
|
||||
int num_dist_histograms = static_cast<int>(mb.distance_histograms.size());
|
||||
if (mb.distance_context_map.empty()) {
|
||||
StoreTrivialContextMap(mb.distance_histograms.size(), kDistanceContextBits,
|
||||
StoreTrivialContextMap(num_dist_histograms, kDistanceContextBits,
|
||||
storage_ix, storage);
|
||||
} else {
|
||||
EncodeContextMap(mb.distance_context_map, mb.distance_histograms.size(),
|
||||
EncodeContextMap(mb.distance_context_map, num_dist_histograms,
|
||||
storage_ix, storage);
|
||||
}
|
||||
|
||||
@ -699,11 +704,11 @@ bool StoreMetaBlock(const uint8_t* input,
|
||||
storage_ix, storage);
|
||||
|
||||
size_t pos = start_pos;
|
||||
for (int i = 0; i < n_commands; ++i) {
|
||||
for (size_t i = 0; i < n_commands; ++i) {
|
||||
const Command cmd = commands[i];
|
||||
int cmd_code = cmd.cmd_prefix_;
|
||||
int lennumextra = cmd.cmd_extra_ >> 48;
|
||||
uint64_t lenextra = cmd.cmd_extra_ & 0xffffffffffffULL;
|
||||
int lennumextra = static_cast<int>(cmd.cmd_extra_ >> 48);
|
||||
uint64_t lenextra = cmd.cmd_extra_ & 0xffffffffffffUL;
|
||||
command_enc.StoreSymbol(cmd_code, storage_ix, storage);
|
||||
WriteBits(lennumextra, lenextra, storage_ix, storage);
|
||||
if (mb.literal_context_map.empty()) {
|
||||
@ -715,7 +720,7 @@ bool StoreMetaBlock(const uint8_t* input,
|
||||
for (int j = 0; j < cmd.insert_len_; ++j) {
|
||||
int context = Context(prev_byte, prev_byte2,
|
||||
literal_context_mode);
|
||||
int literal = input[pos & mask];
|
||||
uint8_t literal = input[pos & mask];
|
||||
literal_enc.StoreSymbolWithContext<kLiteralContextBits>(
|
||||
literal, context, mb.literal_context_map, storage_ix, storage);
|
||||
prev_byte2 = prev_byte;
|
||||
@ -772,7 +777,7 @@ bool StoreMetaBlockTrivial(const uint8_t* input,
|
||||
HistogramDistance dist_histo;
|
||||
|
||||
size_t pos = start_pos;
|
||||
for (int i = 0; i < n_commands; ++i) {
|
||||
for (size_t i = 0; i < n_commands; ++i) {
|
||||
const Command cmd = commands[i];
|
||||
cmd_histo.Add(cmd.cmd_prefix_);
|
||||
for (int j = 0; j < cmd.insert_len_; ++j) {
|
||||
@ -805,11 +810,11 @@ bool StoreMetaBlockTrivial(const uint8_t* input,
|
||||
storage_ix, storage);
|
||||
|
||||
pos = start_pos;
|
||||
for (int i = 0; i < n_commands; ++i) {
|
||||
for (size_t i = 0; i < n_commands; ++i) {
|
||||
const Command cmd = commands[i];
|
||||
const int cmd_code = cmd.cmd_prefix_;
|
||||
const int lennumextra = cmd.cmd_extra_ >> 48;
|
||||
const uint64_t lenextra = cmd.cmd_extra_ & 0xffffffffffffULL;
|
||||
const int lennumextra = static_cast<int>(cmd.cmd_extra_ >> 48);
|
||||
const uint64_t lenextra = cmd.cmd_extra_ & 0xffffffffffffUL;
|
||||
WriteBits(cmd_depth[cmd_code], cmd_bits[cmd_code], storage_ix, storage);
|
||||
WriteBits(lennumextra, lenextra, storage_ix, storage);
|
||||
for (int j = 0; j < cmd.insert_len_; j++) {
|
||||
@ -850,12 +855,12 @@ bool StoreUncompressedMetaBlock(bool final_block,
|
||||
if (masked_pos + len > mask + 1) {
|
||||
size_t len1 = mask + 1 - masked_pos;
|
||||
memcpy(&storage[*storage_ix >> 3], &input[masked_pos], len1);
|
||||
*storage_ix += len1 << 3;
|
||||
*storage_ix += static_cast<int>(len1 << 3);
|
||||
len -= len1;
|
||||
masked_pos = 0;
|
||||
}
|
||||
memcpy(&storage[*storage_ix >> 3], &input[masked_pos], len);
|
||||
*storage_ix += len << 3;
|
||||
*storage_ix += static_cast<int>(len << 3);
|
||||
|
||||
// We need to clear the next 4 bytes to continue to be
|
||||
// compatible with WriteBits.
|
||||
|
@ -32,60 +32,63 @@ static int copybase[] = { 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 18, 22, 30, 38,
|
||||
static int copyextra[] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4,
|
||||
4, 5, 5, 6, 7, 8, 9, 10, 24 };
|
||||
|
||||
static inline int GetInsertLengthCode(int insertlen) {
|
||||
static inline uint16_t GetInsertLengthCode(int insertlen) {
|
||||
if (insertlen < 6) {
|
||||
return insertlen;
|
||||
return static_cast<uint16_t>(insertlen);
|
||||
} else if (insertlen < 130) {
|
||||
insertlen -= 2;
|
||||
int nbits = Log2FloorNonZero(insertlen) - 1;
|
||||
return (nbits << 1) + (insertlen >> nbits) + 2;
|
||||
return static_cast<uint16_t>((nbits << 1) + (insertlen >> nbits) + 2);
|
||||
} else if (insertlen < 2114) {
|
||||
return Log2FloorNonZero(insertlen - 66) + 10;
|
||||
return static_cast<uint16_t>(Log2FloorNonZero(insertlen - 66) + 10);
|
||||
} else if (insertlen < 6210) {
|
||||
return 21;
|
||||
return 21u;
|
||||
} else if (insertlen < 22594) {
|
||||
return 22;
|
||||
return 22u;
|
||||
} else {
|
||||
return 23;
|
||||
return 23u;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int GetCopyLengthCode(int copylen) {
|
||||
static inline uint16_t GetCopyLengthCode(int copylen) {
|
||||
if (copylen < 10) {
|
||||
return copylen - 2;
|
||||
return static_cast<uint16_t>(copylen - 2);
|
||||
} else if (copylen < 134) {
|
||||
copylen -= 6;
|
||||
int nbits = Log2FloorNonZero(copylen) - 1;
|
||||
return (nbits << 1) + (copylen >> nbits) + 4;
|
||||
return static_cast<uint16_t>((nbits << 1) + (copylen >> nbits) + 4);
|
||||
} else if (copylen < 2118) {
|
||||
return Log2FloorNonZero(copylen - 70) + 12;
|
||||
return static_cast<uint16_t>(Log2FloorNonZero(copylen - 70) + 12);
|
||||
} else {
|
||||
return 23;
|
||||
return 23u;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int CombineLengthCodes(
|
||||
int inscode, int copycode, int distancecode) {
|
||||
int bits64 = (copycode & 0x7u) | ((inscode & 0x7u) << 3);
|
||||
if (distancecode == 0 && inscode < 8 && copycode < 16) {
|
||||
static inline uint16_t CombineLengthCodes(
|
||||
uint16_t inscode, uint16_t copycode, bool use_last_distance) {
|
||||
uint16_t bits64 =
|
||||
static_cast<uint16_t>((copycode & 0x7u) | ((inscode & 0x7u) << 3));
|
||||
if (use_last_distance && inscode < 8 && copycode < 16) {
|
||||
return (copycode < 8) ? bits64 : (bits64 | 64);
|
||||
} else {
|
||||
// "To convert an insert-and-copy length code to an insert length code and
|
||||
// a copy length code, the following table can be used"
|
||||
static const int cells[9] = { 2, 3, 6, 4, 5, 8, 7, 9, 10 };
|
||||
return (cells[(copycode >> 3) + 3 * (inscode >> 3)] << 6) | bits64;
|
||||
static const uint16_t cells[9] = { 128u, 192u, 384u, 256u, 320u, 512u,
|
||||
448u, 576u, 640u };
|
||||
return cells[(copycode >> 3) + 3 * (inscode >> 3)] | bits64;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void GetLengthCode(int insertlen, int copylen, int distancecode,
|
||||
static inline void GetLengthCode(int insertlen, int copylen,
|
||||
bool use_last_distance,
|
||||
uint16_t* code, uint64_t* extra) {
|
||||
int inscode = GetInsertLengthCode(insertlen);
|
||||
int copycode = GetCopyLengthCode(copylen);
|
||||
uint16_t inscode = GetInsertLengthCode(insertlen);
|
||||
uint16_t copycode = GetCopyLengthCode(copylen);
|
||||
uint64_t insnumextra = insextra[inscode];
|
||||
uint64_t numextra = insnumextra + copyextra[copycode];
|
||||
uint64_t insextraval = insertlen - insbase[inscode];
|
||||
uint64_t copyextraval = copylen - copybase[copycode];
|
||||
*code = CombineLengthCodes(inscode, copycode, distancecode);
|
||||
*code = CombineLengthCodes(inscode, copycode, use_last_distance);
|
||||
*extra = (numextra << 48) | (copyextraval << insnumextra) | insextraval;
|
||||
}
|
||||
|
||||
@ -97,13 +100,13 @@ struct Command {
|
||||
// npostfix and ndirect were 0, they are only recomputed later after the
|
||||
// clustering if needed.
|
||||
PrefixEncodeCopyDistance(distance_code, 0, 0, &dist_prefix_, &dist_extra_);
|
||||
GetLengthCode(insertlen, copylen_code, dist_prefix_,
|
||||
GetLengthCode(insertlen, copylen_code, dist_prefix_ == 0,
|
||||
&cmd_prefix_, &cmd_extra_);
|
||||
}
|
||||
|
||||
Command(int insertlen)
|
||||
: insert_len_(insertlen), copy_len_(0), dist_prefix_(16), dist_extra_(0) {
|
||||
GetLengthCode(insertlen, 4, dist_prefix_, &cmd_prefix_, &cmd_extra_);
|
||||
GetLengthCode(insertlen, 4, dist_prefix_ == 0, &cmd_prefix_, &cmd_extra_);
|
||||
}
|
||||
|
||||
int DistanceCode() const {
|
||||
|
@ -170,11 +170,12 @@ static inline uint8_t Context(uint8_t p1, uint8_t p2, int mode) {
|
||||
case CONTEXT_LSB6:
|
||||
return p1 & 0x3f;
|
||||
case CONTEXT_MSB6:
|
||||
return p1 >> 2;
|
||||
return static_cast<uint8_t>(p1 >> 2);
|
||||
case CONTEXT_UTF8:
|
||||
return kUTF8ContextLookup[p1] | kUTF8ContextLookup[p2 + 256];
|
||||
case CONTEXT_SIGNED:
|
||||
return (kSigned3BitContextLookup[p1] << 3) + kSigned3BitContextLookup[p2];
|
||||
return static_cast<uint8_t>((kSigned3BitContextLookup[p1] << 3) +
|
||||
kSigned3BitContextLookup[p2]);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ void RecomputeDistancePrefixes(Command* cmds,
|
||||
if (num_direct_distance_codes == 0 && distance_postfix_bits == 0) {
|
||||
return;
|
||||
}
|
||||
for (int i = 0; i < num_commands; ++i) {
|
||||
for (size_t i = 0; i < num_commands; ++i) {
|
||||
Command* cmd = &cmds[i];
|
||||
if (cmd->copy_len_ > 0 && cmd->cmd_prefix_ >= 128) {
|
||||
PrefixEncodeCopyDistance(cmd->DistanceCode(),
|
||||
@ -124,10 +124,10 @@ BrotliCompressor::BrotliCompressor(BrotliParams params)
|
||||
last_byte_ = 1;
|
||||
last_byte_bits_ = 7;
|
||||
} else if (params_.lgwin > 17) {
|
||||
last_byte_ = ((params_.lgwin - 17) << 1) | 1;
|
||||
last_byte_ = static_cast<uint8_t>(((params_.lgwin - 17) << 1) | 1);
|
||||
last_byte_bits_ = 4;
|
||||
} else {
|
||||
last_byte_ = ((params_.lgwin - 8) << 4) | 1;
|
||||
last_byte_ = static_cast<uint8_t>(((params_.lgwin - 8) << 4) | 1);
|
||||
last_byte_bits_ = 7;
|
||||
}
|
||||
|
||||
@ -251,7 +251,7 @@ bool BrotliCompressor::WriteBrotliData(const bool is_last,
|
||||
&num_commands_,
|
||||
&num_literals_);
|
||||
|
||||
int max_length = std::min<int>(mask + 1, 1 << kMaxInputBlockBits);
|
||||
size_t max_length = std::min<size_t>(mask + 1, 1u << kMaxInputBlockBits);
|
||||
if (!is_last && !force_flush &&
|
||||
(params_.quality >= kMinQualityForBlockSplit ||
|
||||
(num_literals_ + num_commands_ < kMaxNumDelayedSymbols)) &&
|
||||
@ -384,11 +384,12 @@ bool BrotliCompressor::WriteMetaBlockInternal(const bool is_last,
|
||||
|
||||
bool uncompressed = false;
|
||||
if (num_commands_ < (bytes >> 8) + 2) {
|
||||
if (num_literals_ > 0.99 * bytes) {
|
||||
if (num_literals_ > 0.99 * static_cast<double>(bytes)) {
|
||||
int literal_histo[256] = { 0 };
|
||||
static const int kSampleRate = 13;
|
||||
static const double kMinEntropy = 7.92;
|
||||
const double bit_cost_threshold = bytes * kMinEntropy / kSampleRate;
|
||||
const double bit_cost_threshold =
|
||||
static_cast<double>(bytes) * kMinEntropy / kSampleRate;
|
||||
for (size_t i = last_flush_pos_; i < input_pos_; i += kSampleRate) {
|
||||
++literal_histo[data[i & mask]];
|
||||
}
|
||||
@ -483,7 +484,7 @@ bool BrotliCompressor::WriteMetaBlockInternal(const bool is_last,
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (bytes + 4 < (storage_ix >> 3)) {
|
||||
if (bytes + 4 < static_cast<size_t>(storage_ix >> 3)) {
|
||||
// Restore the distance cache and last byte.
|
||||
memcpy(dist_cache_, saved_dist_cache_, sizeof(dist_cache_));
|
||||
storage[0] = last_byte_;
|
||||
@ -545,9 +546,11 @@ bool BrotliCompressor::WriteMetadata(const size_t input_size,
|
||||
if (input_size == 0) {
|
||||
WriteBits(2, 0, &storage_ix, encoded_buffer);
|
||||
*encoded_size = (storage_ix + 7) >> 3;
|
||||
} else if (input_size > (1 << 24)) {
|
||||
return false;
|
||||
} else {
|
||||
size_t nbits = Log2Floor(input_size - 1) + 1;
|
||||
size_t nbytes = (nbits + 7) / 8;
|
||||
int nbits = Log2Floor(static_cast<uint32_t>(input_size) - 1) + 1;
|
||||
int nbytes = (nbits + 7) / 8;
|
||||
WriteBits(2, nbytes, &storage_ix, encoded_buffer);
|
||||
WriteBits(8 * nbytes, input_size - 1, &storage_ix, encoded_buffer);
|
||||
size_t hdr_size = (storage_ix + 7) >> 3;
|
||||
|
@ -51,7 +51,7 @@ struct BrotliParams {
|
||||
// Compression mode for UTF-8 format text input.
|
||||
MODE_TEXT = 1,
|
||||
// Compression mode used in WOFF 2.0.
|
||||
MODE_FONT = 2,
|
||||
MODE_FONT = 2
|
||||
};
|
||||
Mode mode;
|
||||
|
||||
@ -154,7 +154,7 @@ class BrotliCompressor {
|
||||
RingBuffer* ringbuffer_;
|
||||
size_t cmd_alloc_size_;
|
||||
Command* commands_;
|
||||
int num_commands_;
|
||||
size_t num_commands_;
|
||||
int num_literals_;
|
||||
int last_insert_len_;
|
||||
size_t last_flush_pos_;
|
||||
@ -165,7 +165,7 @@ class BrotliCompressor {
|
||||
uint8_t last_byte_bits_;
|
||||
uint8_t prev_byte_;
|
||||
uint8_t prev_byte2_;
|
||||
int storage_size_;
|
||||
size_t storage_size_;
|
||||
uint8_t* storage_;
|
||||
};
|
||||
|
||||
|
@ -39,14 +39,14 @@ namespace brotli {
|
||||
|
||||
namespace {
|
||||
|
||||
void RecomputeDistancePrefixes(Command* cmds, int num_commands,
|
||||
void RecomputeDistancePrefixes(Command* cmds, size_t num_commands,
|
||||
int num_direct_distance_codes,
|
||||
int distance_postfix_bits) {
|
||||
if (num_direct_distance_codes == 0 &&
|
||||
distance_postfix_bits == 0) {
|
||||
return;
|
||||
}
|
||||
for (int i = 0; i < num_commands; ++i) {
|
||||
for (size_t i = 0; i < num_commands; ++i) {
|
||||
Command* cmd = &cmds[i];
|
||||
if (cmd->copy_len_ > 0 && cmd->cmd_prefix_ >= 128) {
|
||||
PrefixEncodeCopyDistance(cmd->DistanceCode(),
|
||||
@ -100,7 +100,7 @@ bool WriteMetaBlockParallel(const BrotliParams& params,
|
||||
|
||||
// Compute backward references.
|
||||
int last_insert_len = 0;
|
||||
int num_commands = 0;
|
||||
size_t num_commands = 0;
|
||||
int num_literals = 0;
|
||||
int max_backward_distance = (1 << params.lgwin) - 16;
|
||||
int dist_cache[4] = { -4, -4, -4, -4 };
|
||||
@ -167,7 +167,7 @@ bool WriteMetaBlockParallel(const BrotliParams& params,
|
||||
first_byte_bits = 4;
|
||||
}
|
||||
}
|
||||
storage[0] = first_byte;
|
||||
storage[0] = static_cast<uint8_t>(first_byte);
|
||||
int storage_ix = first_byte_bits;
|
||||
|
||||
// Store the meta-block to the temporary output.
|
||||
@ -195,7 +195,7 @@ bool WriteMetaBlockParallel(const BrotliParams& params,
|
||||
// meta-block.
|
||||
size_t output_size = storage_ix >> 3;
|
||||
if (input_size + 4 < output_size) {
|
||||
storage[0] = first_byte;
|
||||
storage[0] = static_cast<uint8_t>(first_byte);
|
||||
storage_ix = first_byte_bits;
|
||||
if (!StoreUncompressedMetaBlock(is_last, &input[0], input_pos, mask,
|
||||
input_size,
|
||||
@ -253,7 +253,7 @@ int BrotliCompressBufferParallel(BrotliParams params,
|
||||
// Compress block-by-block independently.
|
||||
for (size_t pos = 0; pos < input_size; ) {
|
||||
size_t input_block_size = std::min(max_input_block_size, input_size - pos);
|
||||
size_t out_size = 1.2 * input_block_size + 1024;
|
||||
size_t out_size = input_block_size + (input_block_size >> 3) + 1024;
|
||||
std::vector<uint8_t> out(out_size);
|
||||
if (!WriteMetaBlockParallel(params,
|
||||
input_block_size,
|
||||
@ -273,7 +273,7 @@ int BrotliCompressBufferParallel(BrotliParams params,
|
||||
|
||||
// Piece together the output.
|
||||
size_t out_pos = 0;
|
||||
for (int i = 0; i < compressed_pieces.size(); ++i) {
|
||||
for (size_t i = 0; i < compressed_pieces.size(); ++i) {
|
||||
const std::vector<uint8_t>& out = compressed_pieces[i];
|
||||
if (out_pos + out.size() > *encoded_size) {
|
||||
return false;
|
||||
|
@ -47,7 +47,7 @@ bool SortHuffmanTree(const HuffmanTree &v0, const HuffmanTree &v1) {
|
||||
void SetDepth(const HuffmanTree &p,
|
||||
HuffmanTree *pool,
|
||||
uint8_t *depth,
|
||||
int level) {
|
||||
uint8_t level) {
|
||||
if (p.index_left_ >= 0) {
|
||||
++level;
|
||||
SetDepth(pool[p.index_left_], pool, depth, level);
|
||||
@ -89,11 +89,11 @@ void CreateHuffmanTree(const int *data,
|
||||
for (int i = length - 1; i >= 0; --i) {
|
||||
if (data[i]) {
|
||||
const int count = std::max(data[i], count_limit);
|
||||
tree.push_back(HuffmanTree(count, -1, i));
|
||||
tree.push_back(HuffmanTree(count, -1, static_cast<int16_t>(i)));
|
||||
}
|
||||
}
|
||||
|
||||
const int n = tree.size();
|
||||
const int n = static_cast<int>(tree.size());
|
||||
if (n == 1) {
|
||||
depth[tree[0].index_right_or_value_] = 1; // Only one element.
|
||||
break;
|
||||
@ -132,11 +132,11 @@ void CreateHuffmanTree(const int *data,
|
||||
}
|
||||
|
||||
// The sentinel node becomes the parent node.
|
||||
int j_end = tree.size() - 1;
|
||||
int j_end = static_cast<int>(tree.size()) - 1;
|
||||
tree[j_end].total_count_ =
|
||||
tree[left].total_count_ + tree[right].total_count_;
|
||||
tree[j_end].index_left_ = left;
|
||||
tree[j_end].index_right_or_value_ = right;
|
||||
tree[j_end].index_left_ = static_cast<int16_t>(left);
|
||||
tree[j_end].index_right_or_value_ = static_cast<int16_t>(right);
|
||||
|
||||
// Add back the last sentinel node.
|
||||
tree.push_back(sentinel);
|
||||
@ -155,7 +155,7 @@ void CreateHuffmanTree(const int *data,
|
||||
void Reverse(std::vector<uint8_t>* v, int start, int end) {
|
||||
--end;
|
||||
while (start < end) {
|
||||
int tmp = (*v)[start];
|
||||
uint8_t tmp = (*v)[start];
|
||||
(*v)[start] = (*v)[end];
|
||||
(*v)[end] = tmp;
|
||||
++start;
|
||||
@ -164,8 +164,8 @@ void Reverse(std::vector<uint8_t>* v, int start, int end) {
|
||||
}
|
||||
|
||||
void WriteHuffmanTreeRepetitions(
|
||||
const int previous_value,
|
||||
const int value,
|
||||
const uint8_t previous_value,
|
||||
const uint8_t value,
|
||||
int repetitions,
|
||||
std::vector<uint8_t> *tree,
|
||||
std::vector<uint8_t> *extra_bits_data) {
|
||||
@ -186,15 +186,15 @@ void WriteHuffmanTreeRepetitions(
|
||||
}
|
||||
} else {
|
||||
repetitions -= 3;
|
||||
int start = tree->size();
|
||||
int start = static_cast<int>(tree->size());
|
||||
while (repetitions >= 0) {
|
||||
tree->push_back(16);
|
||||
extra_bits_data->push_back(repetitions & 0x3);
|
||||
repetitions >>= 2;
|
||||
--repetitions;
|
||||
}
|
||||
Reverse(tree, start, tree->size());
|
||||
Reverse(extra_bits_data, start, tree->size());
|
||||
Reverse(tree, start, static_cast<int>(tree->size()));
|
||||
Reverse(extra_bits_data, start, static_cast<int>(tree->size()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -214,15 +214,15 @@ void WriteHuffmanTreeRepetitionsZeros(
|
||||
}
|
||||
} else {
|
||||
repetitions -= 3;
|
||||
int start = tree->size();
|
||||
int start = static_cast<int>(tree->size());
|
||||
while (repetitions >= 0) {
|
||||
tree->push_back(17);
|
||||
extra_bits_data->push_back(repetitions & 0x7);
|
||||
repetitions >>= 3;
|
||||
--repetitions;
|
||||
}
|
||||
Reverse(tree, start, tree->size());
|
||||
Reverse(extra_bits_data, start, tree->size());
|
||||
Reverse(tree, start, static_cast<int>(tree->size()));
|
||||
Reverse(extra_bits_data, start, static_cast<int>(tree->size()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -371,10 +371,10 @@ static void DecideOverRleUse(const uint8_t* depth, const int length,
|
||||
int total_reps_non_zero = 0;
|
||||
int count_reps_zero = 0;
|
||||
int count_reps_non_zero = 0;
|
||||
for (uint32_t i = 0; i < length;) {
|
||||
for (int i = 0; i < length;) {
|
||||
const int value = depth[i];
|
||||
int reps = 1;
|
||||
for (uint32_t k = i + 1; k < length && depth[k] == value; ++k) {
|
||||
for (int k = i + 1; k < length && depth[k] == value; ++k) {
|
||||
++reps;
|
||||
}
|
||||
if (reps >= 3 && value == 0) {
|
||||
@ -397,11 +397,11 @@ void WriteHuffmanTree(const uint8_t* depth,
|
||||
uint32_t length,
|
||||
std::vector<uint8_t> *tree,
|
||||
std::vector<uint8_t> *extra_bits_data) {
|
||||
int previous_value = 8;
|
||||
uint8_t previous_value = 8;
|
||||
|
||||
// Throw away trailing zeros.
|
||||
int new_length = length;
|
||||
for (int i = 0; i < length; ++i) {
|
||||
uint32_t new_length = length;
|
||||
for (uint32_t i = 0; i < length; ++i) {
|
||||
if (depth[length - i - 1] == 0) {
|
||||
--new_length;
|
||||
} else {
|
||||
@ -421,7 +421,7 @@ void WriteHuffmanTree(const uint8_t* depth,
|
||||
|
||||
// Actual rle coding.
|
||||
for (uint32_t i = 0; i < new_length;) {
|
||||
const int value = depth[i];
|
||||
const uint8_t value = depth[i];
|
||||
int reps = 1;
|
||||
if ((value != 0 && use_rle_for_non_zero) ||
|
||||
(value == 0 && use_rle_for_zero)) {
|
||||
@ -450,11 +450,11 @@ uint16_t ReverseBits(int num_bits, uint16_t bits) {
|
||||
size_t retval = kLut[bits & 0xf];
|
||||
for (int i = 4; i < num_bits; i += 4) {
|
||||
retval <<= 4;
|
||||
bits >>= 4;
|
||||
bits = static_cast<uint16_t>(bits >> 4);
|
||||
retval |= kLut[bits & 0xf];
|
||||
}
|
||||
retval >>= (-num_bits & 0x3);
|
||||
return retval;
|
||||
return static_cast<uint16_t>(retval);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -476,7 +476,7 @@ void ConvertBitDepthsToSymbols(const uint8_t *depth, int len, uint16_t *bits) {
|
||||
int code = 0;
|
||||
for (int bits = 1; bits < kMaxBits; ++bits) {
|
||||
code = (code + bl_count[bits - 1]) << 1;
|
||||
next_code[bits] = code;
|
||||
next_code[bits] = static_cast<uint16_t>(code);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < len; ++i) {
|
||||
|
36
enc/hash.h
36
enc/hash.h
@ -155,14 +155,14 @@ class HashLongestMatchQuickly {
|
||||
const size_t ring_buffer_mask,
|
||||
const int* __restrict distance_cache,
|
||||
const uint32_t cur_ix,
|
||||
const uint32_t max_length,
|
||||
const int max_length,
|
||||
const uint32_t max_backward,
|
||||
int * __restrict best_len_out,
|
||||
int * __restrict best_len_code_out,
|
||||
int * __restrict best_distance_out,
|
||||
double* __restrict best_score_out) {
|
||||
const int best_len_in = *best_len_out;
|
||||
const int cur_ix_masked = cur_ix & ring_buffer_mask;
|
||||
const size_t cur_ix_masked = cur_ix & ring_buffer_mask;
|
||||
int compare_char = ring_buffer[cur_ix_masked + best_len_in];
|
||||
double best_score = *best_score_out;
|
||||
int best_len = best_len_in;
|
||||
@ -170,7 +170,7 @@ class HashLongestMatchQuickly {
|
||||
uint32_t prev_ix = cur_ix - cached_backward;
|
||||
bool match_found = false;
|
||||
if (prev_ix < cur_ix) {
|
||||
prev_ix &= ring_buffer_mask;
|
||||
prev_ix &= static_cast<uint32_t>(ring_buffer_mask);
|
||||
if (compare_char == ring_buffer[prev_ix + best_len]) {
|
||||
int len = FindMatchLengthWithLimit(&ring_buffer[prev_ix],
|
||||
&ring_buffer[cur_ix_masked],
|
||||
@ -196,7 +196,7 @@ class HashLongestMatchQuickly {
|
||||
// Only one to look for, don't bother to prepare for a loop.
|
||||
prev_ix = buckets_[key];
|
||||
uint32_t backward = cur_ix - prev_ix;
|
||||
prev_ix &= ring_buffer_mask;
|
||||
prev_ix &= static_cast<uint32_t>(ring_buffer_mask);
|
||||
if (compare_char != ring_buffer[prev_ix + best_len_in]) {
|
||||
return false;
|
||||
}
|
||||
@ -218,7 +218,7 @@ class HashLongestMatchQuickly {
|
||||
prev_ix = *bucket++;
|
||||
for (int i = 0; i < kBucketSweep; ++i, prev_ix = *bucket++) {
|
||||
const uint32_t backward = cur_ix - prev_ix;
|
||||
prev_ix &= ring_buffer_mask;
|
||||
prev_ix &= static_cast<uint32_t>(ring_buffer_mask);
|
||||
if (compare_char != ring_buffer[prev_ix + best_len]) {
|
||||
continue;
|
||||
}
|
||||
@ -262,7 +262,7 @@ class HashLongestMatchQuickly {
|
||||
const int word_id =
|
||||
transform_id * (1 << kBrotliDictionarySizeBitsByLength[len]) +
|
||||
dist;
|
||||
const size_t backward = max_backward + word_id + 1;
|
||||
const int backward = max_backward + word_id + 1;
|
||||
const double score = BackwardReferenceScore(matchlen, backward);
|
||||
if (best_score < score) {
|
||||
++num_dict_matches_;
|
||||
@ -292,7 +292,7 @@ class HashLongestMatchQuickly {
|
||||
uint64_t h = (BROTLI_UNALIGNED_LOAD64(data) << 24) * kHashMul32;
|
||||
// The higher bits contain more mixture from the multiplication,
|
||||
// so we take our results from there.
|
||||
return h >> (64 - kBucketBits);
|
||||
return static_cast<uint32_t>(h >> (64 - kBucketBits));
|
||||
}
|
||||
|
||||
private:
|
||||
@ -348,7 +348,7 @@ class HashLongestMatch {
|
||||
const size_t ring_buffer_mask,
|
||||
const int* __restrict distance_cache,
|
||||
const uint32_t cur_ix,
|
||||
uint32_t max_length,
|
||||
const int max_length,
|
||||
const uint32_t max_backward,
|
||||
int * __restrict best_len_out,
|
||||
int * __restrict best_len_code_out,
|
||||
@ -372,14 +372,14 @@ class HashLongestMatch {
|
||||
if (PREDICT_FALSE(backward > max_backward)) {
|
||||
continue;
|
||||
}
|
||||
prev_ix &= ring_buffer_mask;
|
||||
prev_ix &= static_cast<uint32_t>(ring_buffer_mask);
|
||||
|
||||
if (cur_ix_masked + best_len > ring_buffer_mask ||
|
||||
prev_ix + best_len > ring_buffer_mask ||
|
||||
data[cur_ix_masked + best_len] != data[prev_ix + best_len]) {
|
||||
continue;
|
||||
}
|
||||
const size_t len =
|
||||
const int len =
|
||||
FindMatchLengthWithLimit(&data[prev_ix], &data[cur_ix_masked],
|
||||
max_length);
|
||||
if (len >= 3 || (len == 2 && i < 2)) {
|
||||
@ -407,13 +407,13 @@ class HashLongestMatch {
|
||||
if (PREDICT_FALSE(backward == 0 || backward > max_backward)) {
|
||||
break;
|
||||
}
|
||||
prev_ix &= ring_buffer_mask;
|
||||
prev_ix &= static_cast<uint32_t>(ring_buffer_mask);
|
||||
if (cur_ix_masked + best_len > ring_buffer_mask ||
|
||||
prev_ix + best_len > ring_buffer_mask ||
|
||||
data[cur_ix_masked + best_len] != data[prev_ix + best_len]) {
|
||||
continue;
|
||||
}
|
||||
const size_t len =
|
||||
const int len =
|
||||
FindMatchLengthWithLimit(&data[prev_ix], &data[cur_ix_masked],
|
||||
max_length);
|
||||
if (len >= 4) {
|
||||
@ -450,7 +450,7 @@ class HashLongestMatch {
|
||||
const int word_id =
|
||||
transform_id * (1 << kBrotliDictionarySizeBitsByLength[len]) +
|
||||
dist;
|
||||
const size_t backward = max_backward + word_id + 1;
|
||||
const int backward = max_backward + word_id + 1;
|
||||
double score = BackwardReferenceScore(matchlen, backward);
|
||||
if (best_score < score) {
|
||||
++num_dict_matches_;
|
||||
@ -482,7 +482,7 @@ class HashLongestMatch {
|
||||
void FindAllMatches(const uint8_t* data,
|
||||
const size_t ring_buffer_mask,
|
||||
const uint32_t cur_ix,
|
||||
uint32_t max_length,
|
||||
const int max_length,
|
||||
const uint32_t max_backward,
|
||||
int* num_matches,
|
||||
BackwardMatch* matches) const {
|
||||
@ -502,7 +502,7 @@ class HashLongestMatch {
|
||||
data[cur_ix_masked + 1] != data[prev_ix + 1]) {
|
||||
continue;
|
||||
}
|
||||
const size_t len =
|
||||
const int len =
|
||||
FindMatchLengthWithLimit(&data[prev_ix], &data[cur_ix_masked],
|
||||
max_length);
|
||||
if (len > best_len) {
|
||||
@ -522,13 +522,13 @@ class HashLongestMatch {
|
||||
if (PREDICT_FALSE(backward == 0 || backward > max_backward)) {
|
||||
break;
|
||||
}
|
||||
prev_ix &= ring_buffer_mask;
|
||||
prev_ix &= static_cast<uint32_t>(ring_buffer_mask);
|
||||
if (cur_ix_masked + best_len > ring_buffer_mask ||
|
||||
prev_ix + best_len > ring_buffer_mask ||
|
||||
data[cur_ix_masked + best_len] != data[prev_ix + best_len]) {
|
||||
continue;
|
||||
}
|
||||
const size_t len =
|
||||
const int len =
|
||||
FindMatchLengthWithLimit(&data[prev_ix], &data[cur_ix_masked],
|
||||
max_length);
|
||||
if (len > best_len) {
|
||||
@ -552,7 +552,7 @@ class HashLongestMatch {
|
||||
}
|
||||
}
|
||||
}
|
||||
*num_matches += matches - orig_matches;
|
||||
*num_matches += static_cast<int>(matches - orig_matches);
|
||||
}
|
||||
|
||||
enum { kHashLength = 4 };
|
||||
|
@ -44,7 +44,7 @@ void BuildHistograms(
|
||||
BlockSplitIterator literal_it(literal_split);
|
||||
BlockSplitIterator insert_and_copy_it(insert_and_copy_split);
|
||||
BlockSplitIterator dist_it(dist_split);
|
||||
for (int i = 0; i < num_commands; ++i) {
|
||||
for (size_t i = 0; i < num_commands; ++i) {
|
||||
const Command &cmd = cmds[i];
|
||||
insert_and_copy_it.Next();
|
||||
(*insert_and_copy_histograms)[insert_and_copy_it.type_].Add(
|
||||
|
@ -51,7 +51,7 @@ struct Histogram {
|
||||
}
|
||||
template<typename DataType>
|
||||
void Add(const DataType *p, size_t n) {
|
||||
total_count_ += n;
|
||||
total_count_ += static_cast<int>(n);
|
||||
n += 1;
|
||||
while(--n) ++data_[*p++];
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ static int DecideMultiByteStatsLevel(size_t pos, size_t len, size_t mask,
|
||||
int max_utf8 = 1; // should be 2, but 1 compresses better.
|
||||
int last_c = 0;
|
||||
int utf8_pos = 0;
|
||||
for (int i = 0; i < len; ++i) {
|
||||
for (size_t i = 0; i < len; ++i) {
|
||||
int c = data[(pos + i) & mask];
|
||||
utf8_pos = UTF8Position(last_c, c, 2);
|
||||
++counts[utf8_pos];
|
||||
@ -69,7 +69,7 @@ void EstimateBitCostsForLiteralsUTF8(size_t pos, size_t len, size_t mask,
|
||||
const int max_utf8 = DecideMultiByteStatsLevel(pos, len, mask, data);
|
||||
int histogram[3][256] = { { 0 } };
|
||||
int window_half = 495;
|
||||
int in_window = std::min(static_cast<size_t>(window_half), len);
|
||||
int in_window = std::min(window_half, static_cast<int>(len));
|
||||
int in_window_utf8[3] = { 0 };
|
||||
|
||||
// Bootstrap histograms.
|
||||
@ -84,7 +84,7 @@ void EstimateBitCostsForLiteralsUTF8(size_t pos, size_t len, size_t mask,
|
||||
}
|
||||
|
||||
// Compute bit costs with sliding window.
|
||||
for (int i = 0; i < len; ++i) {
|
||||
for (int i = 0; i < static_cast<int>(len); ++i) {
|
||||
if (i - window_half >= 0) {
|
||||
// Remove a byte in the past.
|
||||
int c = (i - window_half - 1) < 0 ?
|
||||
@ -95,7 +95,7 @@ void EstimateBitCostsForLiteralsUTF8(size_t pos, size_t len, size_t mask,
|
||||
--histogram[utf8_pos2][data[(pos + i - window_half) & mask]];
|
||||
--in_window_utf8[utf8_pos2];
|
||||
}
|
||||
if (i + window_half < len) {
|
||||
if (i + window_half < static_cast<int>(len)) {
|
||||
// Add a byte in the future.
|
||||
int c = data[(pos + i + window_half - 1) & mask];
|
||||
int last_c = data[(pos + i + window_half - 2) & mask];
|
||||
@ -106,12 +106,12 @@ void EstimateBitCostsForLiteralsUTF8(size_t pos, size_t len, size_t mask,
|
||||
int c = i < 1 ? 0 : data[(pos + i - 1) & mask];
|
||||
int last_c = i < 2 ? 0 : data[(pos + i - 2) & mask];
|
||||
int utf8_pos = UTF8Position(last_c, c, max_utf8);
|
||||
int masked_pos = (pos + i) & mask;
|
||||
size_t masked_pos = (pos + i) & mask;
|
||||
int histo = histogram[utf8_pos][data[masked_pos]];
|
||||
if (histo == 0) {
|
||||
histo = 1;
|
||||
}
|
||||
float lit_cost = FastLog2(in_window_utf8[utf8_pos]) - FastLog2(histo);
|
||||
double lit_cost = FastLog2(in_window_utf8[utf8_pos]) - FastLog2(histo);
|
||||
lit_cost += 0.02905;
|
||||
if (lit_cost < 1.0) {
|
||||
lit_cost *= 0.5;
|
||||
@ -124,7 +124,7 @@ void EstimateBitCostsForLiteralsUTF8(size_t pos, size_t len, size_t mask,
|
||||
if (i < 2000) {
|
||||
lit_cost += 0.7 - ((2000 - i) / 2000.0 * 0.35);
|
||||
}
|
||||
cost[i] = lit_cost;
|
||||
cost[i] = static_cast<float>(lit_cost);
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ void EstimateBitCostsForLiterals(size_t pos, size_t len, size_t mask,
|
||||
}
|
||||
int histogram[256] = { 0 };
|
||||
int window_half = 2000;
|
||||
int in_window = std::min(static_cast<size_t>(window_half), len);
|
||||
int in_window = std::min(window_half, static_cast<int>(len));
|
||||
|
||||
// Bootstrap histogram.
|
||||
for (int i = 0; i < in_window; ++i) {
|
||||
@ -144,13 +144,13 @@ void EstimateBitCostsForLiterals(size_t pos, size_t len, size_t mask,
|
||||
}
|
||||
|
||||
// Compute bit costs with sliding window.
|
||||
for (int i = 0; i < len; ++i) {
|
||||
for (int i = 0; i < static_cast<int>(len); ++i) {
|
||||
if (i - window_half >= 0) {
|
||||
// Remove a byte in the past.
|
||||
--histogram[data[(pos + i - window_half) & mask]];
|
||||
--in_window;
|
||||
}
|
||||
if (i + window_half < len) {
|
||||
if (i + window_half < static_cast<int>(len)) {
|
||||
// Add a byte in the future.
|
||||
++histogram[data[(pos + i + window_half) & mask]];
|
||||
++in_window;
|
||||
@ -159,13 +159,13 @@ void EstimateBitCostsForLiterals(size_t pos, size_t len, size_t mask,
|
||||
if (histo == 0) {
|
||||
histo = 1;
|
||||
}
|
||||
float lit_cost = FastLog2(in_window) - FastLog2(histo);
|
||||
double lit_cost = FastLog2(in_window) - FastLog2(histo);
|
||||
lit_cost += 0.029;
|
||||
if (lit_cost < 1.0) {
|
||||
lit_cost *= 0.5;
|
||||
lit_cost += 0.5;
|
||||
}
|
||||
cost[i] = lit_cost;
|
||||
cost[i] = static_cast<float>(lit_cost);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,7 @@ void BuildMetaBlockGreedy(const uint8_t* ringbuffer,
|
||||
size_t n_commands,
|
||||
MetaBlockSplit* mb) {
|
||||
int num_literals = 0;
|
||||
for (int i = 0; i < n_commands; ++i) {
|
||||
for (size_t i = 0; i < n_commands; ++i) {
|
||||
num_literals += commands[i].insert_len_;
|
||||
}
|
||||
|
||||
@ -255,13 +255,13 @@ void BuildMetaBlockGreedy(const uint8_t* ringbuffer,
|
||||
256, 512, 400.0, num_literals,
|
||||
&mb->literal_split, &mb->literal_histograms);
|
||||
BlockSplitter<HistogramCommand> cmd_blocks(
|
||||
kNumCommandPrefixes, 1024, 500.0, n_commands,
|
||||
kNumCommandPrefixes, 1024, 500.0, static_cast<int>(n_commands),
|
||||
&mb->command_split, &mb->command_histograms);
|
||||
BlockSplitter<HistogramDistance> dist_blocks(
|
||||
64, 512, 100.0, n_commands,
|
||||
64, 512, 100.0, static_cast<int>(n_commands),
|
||||
&mb->distance_split, &mb->distance_histograms);
|
||||
|
||||
for (int i = 0; i < n_commands; ++i) {
|
||||
for (size_t i = 0; i < n_commands; ++i) {
|
||||
const Command cmd = commands[i];
|
||||
cmd_blocks.AddSymbol(cmd.cmd_prefix_);
|
||||
for (int j = 0; j < cmd.insert_len_; ++j) {
|
||||
@ -473,7 +473,7 @@ void BuildMetaBlockGreedyWithContexts(const uint8_t* ringbuffer,
|
||||
size_t n_commands,
|
||||
MetaBlockSplit* mb) {
|
||||
int num_literals = 0;
|
||||
for (int i = 0; i < n_commands; ++i) {
|
||||
for (size_t i = 0; i < n_commands; ++i) {
|
||||
num_literals += commands[i].insert_len_;
|
||||
}
|
||||
|
||||
@ -481,13 +481,13 @@ void BuildMetaBlockGreedyWithContexts(const uint8_t* ringbuffer,
|
||||
256, num_contexts, 512, 400.0, num_literals,
|
||||
&mb->literal_split, &mb->literal_histograms);
|
||||
BlockSplitter<HistogramCommand> cmd_blocks(
|
||||
kNumCommandPrefixes, 1024, 500.0, n_commands,
|
||||
kNumCommandPrefixes, 1024, 500.0, static_cast<int>(n_commands),
|
||||
&mb->command_split, &mb->command_histograms);
|
||||
BlockSplitter<HistogramDistance> dist_blocks(
|
||||
64, 512, 100.0, n_commands,
|
||||
64, 512, 100.0, static_cast<int>(n_commands),
|
||||
&mb->distance_split, &mb->distance_histograms);
|
||||
|
||||
for (int i = 0; i < n_commands; ++i) {
|
||||
for (size_t i = 0; i < n_commands; ++i) {
|
||||
const Command cmd = commands[i];
|
||||
cmd_blocks.AddSymbol(cmd.cmd_prefix_);
|
||||
for (int j = 0; j < cmd.insert_len_; ++j) {
|
||||
@ -525,17 +525,17 @@ void BuildMetaBlockGreedyWithContexts(const uint8_t* ringbuffer,
|
||||
void OptimizeHistograms(int num_direct_distance_codes,
|
||||
int distance_postfix_bits,
|
||||
MetaBlockSplit* mb) {
|
||||
for (int i = 0; i < mb->literal_histograms.size(); ++i) {
|
||||
for (size_t i = 0; i < mb->literal_histograms.size(); ++i) {
|
||||
OptimizeHuffmanCountsForRle(256, &mb->literal_histograms[i].data_[0]);
|
||||
}
|
||||
for (int i = 0; i < mb->command_histograms.size(); ++i) {
|
||||
for (size_t i = 0; i < mb->command_histograms.size(); ++i) {
|
||||
OptimizeHuffmanCountsForRle(kNumCommandPrefixes,
|
||||
&mb->command_histograms[i].data_[0]);
|
||||
}
|
||||
int num_distance_codes =
|
||||
kNumDistanceShortCodes + num_direct_distance_codes +
|
||||
(48 << distance_postfix_bits);
|
||||
for (int i = 0; i < mb->distance_histograms.size(); ++i) {
|
||||
for (size_t i = 0; i < mb->distance_histograms.size(); ++i) {
|
||||
OptimizeHuffmanCountsForRle(num_distance_codes,
|
||||
&mb->distance_histograms[i].data_[0]);
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ inline void PrefixEncodeCopyDistance(int distance_code,
|
||||
uint16_t* code,
|
||||
uint32_t* extra_bits) {
|
||||
if (distance_code < kNumDistanceShortCodes + num_direct_codes) {
|
||||
*code = distance_code;
|
||||
*code = static_cast<uint16_t>(distance_code);
|
||||
*extra_bits = 0;
|
||||
return;
|
||||
}
|
||||
@ -75,8 +75,9 @@ inline void PrefixEncodeCopyDistance(int distance_code,
|
||||
int prefix = (distance_code >> bucket) & 1;
|
||||
int offset = (2 + prefix) << bucket;
|
||||
int nbits = bucket - postfix_bits;
|
||||
*code = kNumDistanceShortCodes + num_direct_codes +
|
||||
((2 * (nbits - 1) + prefix) << postfix_bits) + postfix;
|
||||
*code = static_cast<uint16_t>(
|
||||
(kNumDistanceShortCodes + num_direct_codes +
|
||||
((2 * (nbits - 1) + prefix) << postfix_bits) + postfix));
|
||||
*extra_bits = (nbits << 24) | ((distance_code - offset) >> postfix_bits);
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ class RingBuffer {
|
||||
tail_size_(1 << tail_bits),
|
||||
pos_(0) {
|
||||
static const int kSlackForEightByteHashingEverywhere = 7;
|
||||
const int buflen = (1 << window_bits_) + tail_size_;
|
||||
const size_t buflen = (1 << window_bits_) + tail_size_;
|
||||
buffer_ = new uint8_t[buflen + kSlackForEightByteHashingEverywhere];
|
||||
for (int i = 0; i < kSlackForEightByteHashingEverywhere; ++i) {
|
||||
buffer_[buflen + i] = 0;
|
||||
@ -52,7 +52,7 @@ class RingBuffer {
|
||||
// The length of the writes is limited so that we do not need to worry
|
||||
// about a write
|
||||
WriteTail(bytes, n);
|
||||
if (PREDICT_TRUE(masked_pos + n <= (1 << window_bits_))) {
|
||||
if (PREDICT_TRUE(masked_pos + n <= (1U << window_bits_))) {
|
||||
// A single write fits.
|
||||
memcpy(&buffer_[masked_pos], bytes, n);
|
||||
} else {
|
||||
|
@ -44,7 +44,7 @@ enum WordTransformType {
|
||||
kOmitFirst6 = 17,
|
||||
kOmitFirst7 = 18,
|
||||
kOmitFirst8 = 19,
|
||||
kOmitFirst9 = 20,
|
||||
kOmitFirst9 = 20
|
||||
};
|
||||
|
||||
struct Transform {
|
||||
@ -177,7 +177,8 @@ static const Transform kTransforms[] = {
|
||||
{ " ", kUppercaseFirst, "='" },
|
||||
};
|
||||
|
||||
static const int kNumTransforms = sizeof(kTransforms) / sizeof(kTransforms[0]);
|
||||
static const size_t kNumTransforms =
|
||||
sizeof(kTransforms) / sizeof(kTransforms[0]);
|
||||
|
||||
static const int kOmitFirstNTransforms[10] = {
|
||||
0, 3, 11, 26, 34, 39, 40, 55, 0, 54
|
||||
|
@ -22,7 +22,7 @@ namespace brotli {
|
||||
|
||||
namespace {
|
||||
|
||||
int ParseAsUTF8(int* symbol, const uint8_t* input, int size) {
|
||||
int ParseAsUTF8(int* symbol, const uint8_t* input, size_t size) {
|
||||
// ASCII
|
||||
if ((input[0] & 0x80) == 0) {
|
||||
*symbol = input[0];
|
||||
@ -31,7 +31,7 @@ int ParseAsUTF8(int* symbol, const uint8_t* input, int size) {
|
||||
}
|
||||
}
|
||||
// 2-byte UTF8
|
||||
if (size > 1 &&
|
||||
if (size > 1u &&
|
||||
(input[0] & 0xe0) == 0xc0 &&
|
||||
(input[1] & 0xc0) == 0x80) {
|
||||
*symbol = (((input[0] & 0x1f) << 6) |
|
||||
@ -41,7 +41,7 @@ int ParseAsUTF8(int* symbol, const uint8_t* input, int size) {
|
||||
}
|
||||
}
|
||||
// 3-byte UFT8
|
||||
if (size > 2 &&
|
||||
if (size > 2u &&
|
||||
(input[0] & 0xf0) == 0xe0 &&
|
||||
(input[1] & 0xc0) == 0x80 &&
|
||||
(input[2] & 0xc0) == 0x80) {
|
||||
@ -53,7 +53,7 @@ int ParseAsUTF8(int* symbol, const uint8_t* input, int size) {
|
||||
}
|
||||
}
|
||||
// 4-byte UFT8
|
||||
if (size > 3 &&
|
||||
if (size > 3u &&
|
||||
(input[0] & 0xf8) == 0xf0 &&
|
||||
(input[1] & 0xc0) == 0x80 &&
|
||||
(input[2] & 0xc0) == 0x80 &&
|
||||
@ -84,7 +84,7 @@ bool IsMostlyUTF8(const uint8_t* data, const size_t pos, const size_t mask,
|
||||
i += bytes_read;
|
||||
if (symbol < 0x110000) size_utf8 += bytes_read;
|
||||
}
|
||||
return size_utf8 > min_fraction * length;
|
||||
return size_utf8 > min_fraction * static_cast<double>(length);
|
||||
}
|
||||
|
||||
} // namespace brotli
|
||||
|
@ -49,7 +49,7 @@ inline void WriteBits(int n_bits,
|
||||
#ifdef BIT_WRITER_DEBUG
|
||||
printf("WriteBits %2d 0x%016llx %10d\n", n_bits, bits, *pos);
|
||||
#endif
|
||||
assert(bits < 1ULL << n_bits);
|
||||
assert(bits < 1UL << n_bits);
|
||||
#ifdef IS_LITTLE_ENDIAN
|
||||
// This branch of the code can write up to 56 bits at a time,
|
||||
// 7 bits are lost by being perhaps already in *p and at least
|
||||
|
Loading…
Reference in New Issue
Block a user