[wasm] Add some DCHECKs to the native module cache

And fix a few issues that were already found.

R=clemensb@chromium.org

Change-Id: Ib93626751220dcdd2b9647a6e352bd86bd0ef1ef
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2039053
Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66155}
This commit is contained in:
Thibaud Michaud 2020-02-06 15:11:05 +01:00 committed by Commit Bot
parent 1d2c043aaa
commit 9c44330d86
3 changed files with 39 additions and 21 deletions

View File

@ -1505,6 +1505,8 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
std::shared_ptr<Counters> counters,
AccountingAllocator* allocator);
~AsyncStreamingProcessor();
bool ProcessModuleHeader(Vector<const uint8_t> bytes,
uint32_t offset) override;
@ -2116,15 +2118,19 @@ AsyncStreamingProcessor::AsyncStreamingProcessor(
async_counters_(async_counters),
allocator_(allocator) {}
AsyncStreamingProcessor::~AsyncStreamingProcessor() {
if (job_->native_module_ && job_->native_module_->wire_bytes().empty()) {
// Clean up the temporary cache entry.
job_->isolate_->wasm_engine()->StreamingCompilationFailed(prefix_hash_);
}
}
void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
const WasmError& error) {
DCHECK(error.has_error());
// Make sure all background tasks stopped executing before we change the state
// of the AsyncCompileJob to DecodeFail.
job_->background_task_manager_.CancelAndWait();
if (!prefix_cache_hit_ && job_->native_module_) {
job_->isolate_->wasm_engine()->StreamingCompilationFailed(prefix_hash_);
}
// Check if there is already a CompiledModule, in which case we have to clean
// up the CompilationStateImpl as well.
@ -2168,7 +2174,7 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
// compilation_unit_builder_ anymore.
CommitCompilationUnits();
compilation_unit_builder_.reset();
} else {
} else if (!prefix_cache_hit_) {
// Combine section hashes until code section.
prefix_hash_ = base::hash_combine(prefix_hash_,
NativeModuleCache::WireBytesHash(bytes));
@ -2321,6 +2327,7 @@ void AsyncStreamingProcessor::OnFinishedChunk() {
// Finish the processing of the stream.
void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
TRACE_STREAMING("Finish stream...\n");
DCHECK_EQ(NativeModuleCache::PrefixHash(bytes.as_vector()), prefix_hash_);
ModuleResult result = decoder_.FinishDecoding(false);
if (result.failed()) {
FinishAsyncCompileJobWithError(result.error());

View File

@ -152,6 +152,7 @@ std::shared_ptr<NativeModule> NativeModuleCache::MaybeGetNativeModule(
}
if (it->second.has_value()) {
if (auto shared_native_module = it->second.value().lock()) {
DCHECK_EQ(shared_native_module->wire_bytes(), wire_bytes);
return shared_native_module;
}
}
@ -163,15 +164,21 @@ bool NativeModuleCache::GetStreamingCompilationOwnership(size_t prefix_hash) {
base::MutexGuard lock(&mutex_);
auto it = map_.lower_bound(Key{prefix_hash, {}});
if (it != map_.end() && it->first.prefix_hash == prefix_hash) {
DCHECK_IMPLIES(!it->first.bytes.empty(),
PrefixHash(it->first.bytes) == prefix_hash);
return false;
}
Key key{prefix_hash, {}};
return map_.emplace(key, base::nullopt).second;
DCHECK_EQ(0, map_.count(key));
map_.emplace(key, base::nullopt);
return true;
}
void NativeModuleCache::StreamingCompilationFailed(size_t prefix_hash) {
base::MutexGuard lock(&mutex_);
map_.erase({prefix_hash, {}});
Key key{prefix_hash, {}};
DCHECK_EQ(1, map_.count(key));
map_.erase(key);
cache_cv_.NotifyAll();
}
@ -180,6 +187,7 @@ std::shared_ptr<NativeModule> NativeModuleCache::Update(
DCHECK_NOT_NULL(native_module);
if (native_module->module()->origin != kWasmOrigin) return native_module;
Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
DCHECK(!wire_bytes.empty());
size_t prefix_hash = PrefixHash(native_module->wire_bytes());
base::MutexGuard lock(&mutex_);
map_.erase(Key{prefix_hash, {}});
@ -189,13 +197,13 @@ std::shared_ptr<NativeModule> NativeModuleCache::Update(
if (it->second.has_value()) {
auto conflicting_module = it->second.value().lock();
if (conflicting_module != nullptr) {
DCHECK_EQ(conflicting_module->wire_bytes(), wire_bytes);
return conflicting_module;
}
}
map_.erase(it);
}
if (!error) {
DCHECK_LT(0, native_module->wire_bytes().length());
// The key now points to the new native module's owned copy of the bytes,
// so that it stays valid until the native module is freed and erased from
// the map.
@ -211,20 +219,11 @@ std::shared_ptr<NativeModule> NativeModuleCache::Update(
void NativeModuleCache::Erase(NativeModule* native_module) {
if (native_module->module()->origin != kWasmOrigin) return;
// Happens in some tests where bytes are set directly.
if (native_module->wire_bytes().length() == 0) return;
if (native_module->wire_bytes().empty()) return;
base::MutexGuard lock(&mutex_);
size_t prefix_hash = PrefixHash(native_module->wire_bytes());
auto cache_it = map_.find(Key{prefix_hash, native_module->wire_bytes()});
// Not all native modules are stored in the cache currently. In particular
// streaming compilation and asmjs compilation results are not. So make
// sure that we only delete existing and expired entries.
// Do not erase {nullopt} values either, as they indicate that the
// {NativeModule} is currently being created in another thread.
if (cache_it != map_.end() && cache_it->second.has_value() &&
cache_it->second.value().expired()) {
map_.erase(cache_it);
cache_cv_.NotifyAll();
}
map_.erase(Key{prefix_hash, native_module->wire_bytes()});
cache_cv_.NotifyAll();
}
// static
@ -246,12 +245,17 @@ size_t NativeModuleCache::PrefixHash(Vector<const uint8_t> wire_bytes) {
section_id = static_cast<SectionCode>(decoder.consume_u8());
uint32_t section_size = decoder.consume_u32v("section size");
if (section_id == SectionCode::kCodeSectionCode) {
hash = base::hash_combine(hash, section_size);
uint32_t num_functions = decoder.consume_u32v("num functions");
// If {num_functions} is 0, the streaming decoder skips the section. Do
// the same here to ensure hashes are consistent.
if (num_functions != 0) {
hash = base::hash_combine(hash, section_size);
}
break;
}
const uint8_t* payload_start = decoder.pc();
// TODO(v8:10126): Remove this check, bytes have been validated already.
if (decoder.position() + section_size >= wire_bytes.size()) {
if (decoder.position() + section_size > wire_bytes.size()) {
return hash;
}
decoder.consume_bytes(section_size, "section payload");
@ -359,6 +363,8 @@ WasmEngine::~WasmEngine() {
DCHECK(isolates_.empty());
// All NativeModules did die.
DCHECK(native_modules_.empty());
// Native module cache does not leak.
DCHECK(native_module_cache_.empty());
}
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,

View File

@ -67,6 +67,8 @@ class NativeModuleCache {
bool operator<(const Key& other) const {
if (prefix_hash != other.prefix_hash) {
DCHECK_IMPLIES(!bytes.empty() && !other.bytes.empty(),
bytes != other.bytes);
return prefix_hash < other.prefix_hash;
}
if (bytes.size() != other.bytes.size()) {
@ -75,6 +77,7 @@ class NativeModuleCache {
// Fast path when the base pointers are the same.
// Also handles the {nullptr} case which would be UB for memcmp.
if (bytes.begin() == other.bytes.begin()) {
DCHECK_EQ(prefix_hash, other.prefix_hash);
return false;
}
DCHECK_NOT_NULL(bytes.begin());
@ -91,6 +94,8 @@ class NativeModuleCache {
std::shared_ptr<NativeModule> native_module, bool error);
void Erase(NativeModule* native_module);
bool empty() { return map_.empty(); }
static size_t WireBytesHash(Vector<const uint8_t> bytes);
// Hash the wire bytes up to the code section header. Used as a heuristic to