Revert "[wasm] Compile debug code lazily"

This reverts commit 7b138dd30d.

Reason for revert: Causes multiple flakes:
https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20TSAN%20-%20isolates/22932/overview
https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux%20-%20debug/41934/overview

Original change's description:
> [wasm] Compile debug code lazily
>
> Currently V8 recompiles all functions of a WebAssembly module when a
> debugging session starts. This is outdated behavior and
> causes OOMs for developers. With this CL all compiled code just gets
> removed when a debugging session starts, and debugging code gets
> compiled lazily.
>
> This behavior may lead to small delays whenever a new function gets
> entered by the debugger. However, developers are used to debugging code
> being slightly slower, and the small delays should be in the order of
> few milliseconds. On the other hand, debug modules can be big,
> sometimes even more than 1'000'000 functions, and developers reported
> OOMs when debugging.
>
> R=​clemensb@chromium.org
>
> Bug: v8:13541, chromium:1372621, v8:13224
> Change-Id: Ia36d9b8743523b1c89221c59f989268e27f6ce98
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4067302
> Reviewed-by: Kim-Anh Tran <kimanh@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Commit-Queue: Andreas Haas <ahaas@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#84662}

Bug: v8:13541, chromium:1372621, v8:13224
Change-Id: Ic5442462d158618f2d43b8e0ebdfb90017ed378a
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4080034
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Auto-Submit: Michael Achenbach <machenbach@chromium.org>
Owners-Override: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84665}
This commit is contained in:
Michael Achenbach 2022-12-05 18:53:05 +00:00 committed by V8 LUCI CQ
parent 3573da0bc8
commit 5073ba7d52
31 changed files with 402 additions and 168 deletions

View File

@ -8273,7 +8273,7 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
wasm::ExecutionTier::kNone, wasm::kNotForDebugging);
wasm::ExecutionTier::kNone, wasm::kNoDebugging);
published_code = native_module->PublishCode(std::move(wasm_code));
}
return published_code;
@ -8326,7 +8326,7 @@ wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule* native_module,
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), wasm::WasmCode::kWasmToJsWrapper,
wasm::ExecutionTier::kNone, wasm::kNotForDebugging);
wasm::ExecutionTier::kNone, wasm::kNoDebugging);
return native_module->PublishCode(std::move(wasm_code));
}
}

View File

@ -958,9 +958,9 @@ MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* v8_isolate,
}
#if V8_ENABLE_WEBASSEMBLY
void EnterDebuggingForIsolate(Isolate* v8_isolate) {
void TierDownAllModulesPerIsolate(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::wasm::GetWasmEngine()->EnterDebuggingForIsolate(isolate);
i::wasm::GetWasmEngine()->TierDownAllModulesPerIsolate(isolate);
}
void LeaveDebuggingForIsolate(Isolate* v8_isolate) {

View File

@ -315,7 +315,7 @@ V8_EXPORT_PRIVATE void SetDebugDelegate(Isolate* isolate,
DebugDelegate* listener);
#if V8_ENABLE_WEBASSEMBLY
V8_EXPORT_PRIVATE void EnterDebuggingForIsolate(Isolate* isolate);
V8_EXPORT_PRIVATE void TierDownAllModulesPerIsolate(Isolate* isolate);
V8_EXPORT_PRIVATE void LeaveDebuggingForIsolate(Isolate* isolate);
#endif // V8_ENABLE_WEBASSEMBLY

View File

@ -395,7 +395,7 @@ GdbServer::DebugDelegate::DebugDelegate(Isolate* isolate, GdbServer* gdb_server)
// Register the delegate
isolate_->debug()->SetDebugDelegate(this);
v8::debug::EnterDebuggingForIsolate((v8::Isolate*)isolate_);
v8::debug::TierDownAllModulesPerIsolate((v8::Isolate*)isolate_);
v8::debug::ChangeBreakOnException((v8::Isolate*)isolate_,
v8::debug::BreakOnUncaughtException);
}

View File

@ -102,7 +102,7 @@ void V8Debugger::enable() {
v8::debug::ChangeBreakOnException(m_isolate, v8::debug::NoBreakOnException);
m_pauseOnExceptionsState = v8::debug::NoBreakOnException;
#if V8_ENABLE_WEBASSEMBLY
v8::debug::EnterDebuggingForIsolate(m_isolate);
v8::debug::TierDownAllModulesPerIsolate(m_isolate);
#endif // V8_ENABLE_WEBASSEMBLY
}

View File

@ -432,30 +432,13 @@ RUNTIME_FUNCTION(Runtime_WasmTierUpFunction) {
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_WasmEnterDebugging) {
RUNTIME_FUNCTION(Runtime_WasmTierDown) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
wasm::GetWasmEngine()->EnterDebuggingForIsolate(isolate);
wasm::GetWasmEngine()->TierDownAllModulesPerIsolate(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_IsWasmDebugFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<JSFunction> function = args.at<JSFunction>(0);
CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
Handle<WasmExportedFunction> exp_fun =
Handle<WasmExportedFunction>::cast(function);
wasm::NativeModule* native_module =
exp_fun->instance().module_object().native_module();
uint32_t func_index = exp_fun->function_index();
wasm::WasmCodeRefScope code_ref_scope;
wasm::WasmCode* code = native_module->GetCode(func_index);
return isolate->heap()->ToBoolean(
code && code->is_liftoff() &&
(code->for_debugging() != wasm::kNotForDebugging));
}
RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());

View File

@ -654,7 +654,6 @@ namespace internal {
F(IsAsmWasmCode, 1, 1) \
F(IsLiftoffFunction, 1, 1) \
F(IsTurboFanFunction, 1, 1) \
F(IsWasmDebugFunction, 1, 1) \
F(IsThreadInWasm, 0, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
@ -663,7 +662,7 @@ namespace internal {
F(SetWasmInstantiateControls, 0, 1) \
F(WasmGetNumberOfInstances, 1, 1) \
F(WasmNumCodeSpaces, 1, 1) \
F(WasmEnterDebugging, 0, 1) \
F(WasmTierDown, 0, 1) \
F(WasmTierUpFunction, 2, 1) \
F(WasmTraceEnter, 0, 1) \
F(WasmTraceExit, 1, 1) \

View File

@ -775,7 +775,7 @@ class LiftoffCompiler {
// overflows in the budget calculation.
DCHECK_LE(1, budget_used);
if (for_debugging_ != kNotForDebugging) return;
if (for_debugging_ != kNoDebugging) return;
CODE_COMMENT("tierup check");
// We never want to blow the entire budget at once.
const int kMax = v8_flags.wasm_tiering_budget / 4;
@ -857,7 +857,7 @@ class LiftoffCompiler {
}
bool dynamic_tiering() {
return env_->dynamic_tiering && for_debugging_ == kNotForDebugging &&
return env_->dynamic_tiering && for_debugging_ == kNoDebugging &&
(v8_flags.wasm_tier_up_filter == -1 ||
v8_flags.wasm_tier_up_filter == func_index_);
}

View File

@ -56,7 +56,7 @@ enum LiftoffBailoutReason : int8_t {
struct LiftoffOptions {
int func_index = -1;
ForDebugging for_debugging = kNotForDebugging;
ForDebugging for_debugging = kNoDebugging;
Counters* counters = nullptr;
AssemblerBufferCache* assembler_buffer_cache = nullptr;
WasmFeatures* detected_features = nullptr;

View File

@ -124,6 +124,7 @@ enum class CompilationEvent : uint8_t {
kFinishedExportWrappers,
kFinishedCompilationChunk,
kFailedCompilation,
kFinishedRecompilation
};
class V8_EXPORT_PRIVATE CompilationEventCallback {
@ -174,6 +175,7 @@ class V8_EXPORT_PRIVATE CompilationState {
bool failed() const;
bool baseline_compilation_finished() const;
bool recompilation_finished() const;
void set_compilation_id(int compilation_id);

View File

@ -164,7 +164,7 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
DCHECK_LE(native_module->num_imported_functions(), function->func_index);
DCHECK_LT(function->func_index, native_module->num_functions());
WasmCompilationUnit unit(function->func_index, tier, kNotForDebugging);
WasmCompilationUnit unit(function->func_index, tier, kNoDebugging);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage().get(),

View File

@ -56,16 +56,13 @@ struct WasmCompilationResult {
ExecutionTier requested_tier;
ExecutionTier result_tier;
Kind kind = kFunction;
ForDebugging for_debugging = kNotForDebugging;
ForDebugging for_debugging = kNoDebugging;
};
class V8_EXPORT_PRIVATE WasmCompilationUnit final {
public:
WasmCompilationUnit(int index, ExecutionTier tier, ForDebugging for_debugging)
: func_index_(index), tier_(tier), for_debugging_(for_debugging) {
DCHECK_IMPLIES(for_debugging != ForDebugging::kNotForDebugging,
tier_ == ExecutionTier::kLiftoff);
}
: func_index_(index), tier_(tier), for_debugging_(for_debugging) {}
WasmCompilationResult ExecuteCompilation(CompilationEnv*,
const WireBytesStorage*, Counters*,

View File

@ -575,6 +575,8 @@ class CompilationStateImpl {
int num_export_wrappers,
ProfileInformation* pgo_info);
// Initialize the compilation progress after deserialization. This is needed
// for recompilation (e.g. for tier down) to work later.
void InitializeCompilationProgressAfterDeserialization(
base::Vector<const int> lazy_functions,
base::Vector<const int> eager_functions);
@ -589,6 +591,14 @@ class CompilationStateImpl {
// equivalent to {InitializeCompilationUnits}.
void AddCompilationUnit(CompilationUnitBuilder* builder, int func_index);
// Initialize recompilation of the whole module: Setup compilation progress
// for recompilation and add the respective compilation units. The callback is
// called immediately if no recompilation is needed, or called later
// otherwise.
void InitializeRecompilation(TieringState new_tiering_state,
std::unique_ptr<CompilationEventCallback>
recompilation_finished_callback);
// Add the callback to be called on compilation events. Needs to be
// set before {CommitCompilationUnits} is run to ensure that it receives all
// events. The callback object must support being deleted from any thread.
@ -641,6 +651,11 @@ class CompilationStateImpl {
outstanding_export_wrappers_ == 0;
}
bool recompilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
return outstanding_recompilation_functions_ == 0;
}
DynamicTiering dynamic_tiering() const { return dynamic_tiering_; }
Counters* counters() const { return async_counters_.get(); }
@ -755,6 +770,9 @@ class CompilationStateImpl {
size_t bytes_since_last_chunk_ = 0;
std::vector<uint8_t> compilation_progress_;
int outstanding_recompilation_functions_ = 0;
TieringState tiering_state_ = kTieredUp;
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
@ -767,6 +785,7 @@ class CompilationStateImpl {
using RequiredBaselineTierField = base::BitField8<ExecutionTier, 0, 2>;
using RequiredTopTierField = base::BitField8<ExecutionTier, 2, 2>;
using ReachedTierField = base::BitField8<ExecutionTier, 4, 2>;
using MissingRecompilationField = base::BitField8<bool, 6, 1>;
};
CompilationStateImpl* Impl(CompilationState* compilation_state) {
@ -849,6 +868,10 @@ bool CompilationState::baseline_compilation_finished() const {
return Impl(this)->baseline_compilation_finished();
}
bool CompilationState::recompilation_finished() const {
return Impl(this)->recompilation_finished();
}
void CompilationState::set_compilation_id(int compilation_id) {
Impl(this)->set_compilation_id(compilation_id);
}
@ -922,19 +945,15 @@ struct ExecutionTierPair {
ExecutionTier top_tier;
};
// Pass the debug state as a separate parameter to avoid data races: the debug
// state may change between its use here and its use at the call site. To have
// a consistent view on the debug state, the caller reads the debug state once
// and then passes it to this function.
ExecutionTierPair GetDefaultTiersPerModule(NativeModule* native_module,
DynamicTiering dynamic_tiering,
DebugState is_in_debug_state,
bool lazy_module) {
const WasmModule* module = native_module->module();
if (is_asmjs_module(module)) {
return {ExecutionTier::kTurbofan, ExecutionTier::kTurbofan};
}
if (is_in_debug_state) {
// TODO(13224): Use lazy compilation for debug code.
if (native_module->IsTieredDown()) {
return {ExecutionTier::kLiftoff, ExecutionTier::kLiftoff};
}
if (lazy_module) {
@ -949,17 +968,14 @@ ExecutionTierPair GetDefaultTiersPerModule(NativeModule* native_module,
}
ExecutionTierPair GetLazyCompilationTiers(NativeModule* native_module,
uint32_t func_index,
DebugState is_in_debug_state) {
uint32_t func_index) {
DynamicTiering dynamic_tiering =
Impl(native_module->compilation_state())->dynamic_tiering();
// For lazy compilation, get the tiers we would use if lazy compilation is
// disabled.
constexpr bool kNotLazy = false;
ExecutionTierPair tiers = GetDefaultTiersPerModule(
native_module, dynamic_tiering, is_in_debug_state, kNotLazy);
// If we are in debug mode, we ignore compilation hints.
if (is_in_debug_state) return tiers;
ExecutionTierPair tiers =
GetDefaultTiersPerModule(native_module, dynamic_tiering, kNotLazy);
// Check if compilation hints override default tiering behaviour.
if (native_module->enabled_features().has_compilation_hints()) {
@ -996,7 +1012,7 @@ class CompilationUnitBuilder {
void AddImportUnit(uint32_t func_index) {
DCHECK_GT(native_module_->module()->num_imported_functions, func_index);
baseline_units_.emplace_back(func_index, ExecutionTier::kNone,
kNotForDebugging);
kNoDebugging);
}
void AddJSToWasmWrapperUnit(
@ -1005,11 +1021,11 @@ class CompilationUnitBuilder {
}
void AddBaselineUnit(int func_index, ExecutionTier tier) {
baseline_units_.emplace_back(func_index, tier, kNotForDebugging);
baseline_units_.emplace_back(func_index, tier, kNoDebugging);
}
void AddTopTierUnit(int func_index, ExecutionTier tier) {
tiering_units_.emplace_back(func_index, tier, kNotForDebugging);
tiering_units_.emplace_back(func_index, tier, kNoDebugging);
}
void AddDebugUnit(int func_index) {
@ -1017,6 +1033,13 @@ class CompilationUnitBuilder {
kForDebugging);
}
void AddRecompilationUnit(int func_index, ExecutionTier tier) {
// For recompilation, just treat all units like baseline units.
baseline_units_.emplace_back(
func_index, tier,
tier == ExecutionTier::kLiftoff ? kForDebugging : kNoDebugging);
}
bool Commit() {
if (baseline_units_.empty() && tiering_units_.empty() &&
js_to_wasm_wrapper_units_.empty()) {
@ -1116,15 +1139,12 @@ bool CompileLazy(Isolate* isolate, WasmInstanceObject instance,
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
DebugState is_in_debug_state = native_module->IsInDebugState();
ExecutionTierPair tiers =
GetLazyCompilationTiers(native_module, func_index, is_in_debug_state);
ExecutionTierPair tiers = GetLazyCompilationTiers(native_module, func_index);
DCHECK_LE(native_module->num_imported_functions(), func_index);
DCHECK_LT(func_index, native_module->num_functions());
WasmCompilationUnit baseline_unit{
func_index, tiers.baseline_tier,
is_in_debug_state ? kForDebugging : kNotForDebugging};
WasmCompilationUnit baseline_unit{func_index, tiers.baseline_tier,
kNoDebugging};
CompilationEnv env = native_module->CreateCompilationEnv();
// TODO(wasm): Use an assembler buffer cache for lazy compilation.
AssemblerBufferCache* assembler_buffer_cache = nullptr;
@ -1171,8 +1191,7 @@ bool CompileLazy(Isolate* isolate, WasmInstanceObject instance,
if (GetCompileStrategy(module, native_module->enabled_features(), func_index,
lazy_module) == CompileStrategy::kLazy &&
tiers.baseline_tier < tiers.top_tier) {
WasmCompilationUnit tiering_unit{func_index, tiers.top_tier,
kNotForDebugging};
WasmCompilationUnit tiering_unit{func_index, tiers.top_tier, kNoDebugging};
compilation_state->CommitTopTierCompilationUnit(tiering_unit);
}
return true;
@ -1383,7 +1402,7 @@ void TriggerTierUp(WasmInstanceObject instance, int func_index) {
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
WasmCompilationUnit tiering_unit{func_index, ExecutionTier::kTurbofan,
kNotForDebugging};
kNoDebugging};
const WasmModule* module = native_module->module();
int priority;
@ -2000,6 +2019,44 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
return native_module;
}
void RecompileNativeModule(NativeModule* native_module,
TieringState tiering_state) {
// Install a callback to notify us once background recompilation finished.
auto recompilation_finished_semaphore = std::make_shared<base::Semaphore>(0);
auto* compilation_state = Impl(native_module->compilation_state());
class RecompilationFinishedCallback : public CompilationEventCallback {
public:
explicit RecompilationFinishedCallback(
std::shared_ptr<base::Semaphore> recompilation_finished_semaphore)
: recompilation_finished_semaphore_(
std::move(recompilation_finished_semaphore)) {}
void call(CompilationEvent event) override {
DCHECK_NE(CompilationEvent::kFailedCompilation, event);
if (event == CompilationEvent::kFinishedRecompilation) {
recompilation_finished_semaphore_->Signal();
}
}
private:
std::shared_ptr<base::Semaphore> recompilation_finished_semaphore_;
};
// The callback captures a shared ptr to the semaphore.
// Initialize the compilation units and kick off background compile tasks.
compilation_state->InitializeRecompilation(
tiering_state, std::make_unique<RecompilationFinishedCallback>(
recompilation_finished_semaphore));
constexpr JobDelegate* kNoDelegate = nullptr;
ExecuteCompilationUnits(compilation_state->native_module_weak(),
compilation_state->counters(), kNoDelegate,
kBaselineOnly);
recompilation_finished_semaphore->Wait();
DCHECK(!compilation_state->failed());
}
AsyncCompileJob::AsyncCompileJob(
Isolate* isolate, WasmFeatures enabled_features,
base::OwnedVector<const uint8_t> bytes, Handle<Context> context,
@ -2308,10 +2365,11 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
// We can only update the feature counts once the entire compile is done.
compilation_state->PublishDetectedFeatures(isolate_);
// We might need debug code for the module, if the debugger was enabled while
// streaming compilation was running. Since handling this while compiling via
// streaming is tricky, we just tier down now, before publishing the module.
if (native_module_->IsInDebugState()) native_module_->RemoveAllCompiledCode();
// We might need to recompile the module for debugging, if the debugger was
// enabled while streaming compilation was running. Since handling this while
// compiling via streaming is tricky, we just tier down now, before publishing
// the module.
if (native_module_->IsTieredDown()) native_module_->RecompileForTiering();
// Finally, log all generated code (it does not matter if this happens
// repeatedly in case the script is shared).
@ -2379,6 +2437,10 @@ class AsyncCompileJob::CompilationStateCallback
job_->DoSync<Fail>();
}
break;
case CompilationEvent::kFinishedRecompilation:
// This event can happen out of order, hence don't remember this in
// {last_event_}.
return;
}
#ifdef DEBUG
last_event_ = event;
@ -3138,8 +3200,7 @@ void CompilationStateImpl::InitializeCompilationProgress(
// Compute the default compilation progress for all functions, and set it.
const ExecutionTierPair default_tiers = GetDefaultTiersPerModule(
native_module_, dynamic_tiering_, native_module_->IsInDebugState(),
IsLazyModule(module));
native_module_, dynamic_tiering_, IsLazyModule(module));
const uint8_t default_progress =
RequiredBaselineTierField::encode(default_tiers.baseline_tier) |
RequiredTopTierField::encode(default_tiers.top_tier) |
@ -3219,7 +3280,7 @@ uint8_t CompilationStateImpl::AddCompilationUnitInternal(
void CompilationStateImpl::InitializeCompilationUnits(
std::unique_ptr<CompilationUnitBuilder> builder) {
int offset = native_module_->module()->num_imported_functions;
if (native_module_->IsInDebugState()) {
if (native_module_->IsTieredDown()) {
for (size_t i = 0; i < compilation_progress_.size(); ++i) {
int func_index = offset + static_cast<int>(i);
builder->AddDebugUnit(func_index);
@ -3239,6 +3300,10 @@ void CompilationStateImpl::InitializeCompilationUnits(
void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder,
int func_index) {
if (native_module_->IsTieredDown()) {
builder->AddDebugUnit(func_index);
return;
}
int offset = native_module_->module()->num_imported_functions;
int progress_index = func_index - offset;
uint8_t function_progress;
@ -3307,8 +3372,7 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
// Update compilation state for eagerly compiled functions.
constexpr bool kNotLazy = false;
ExecutionTierPair default_tiers =
GetDefaultTiersPerModule(native_module_, dynamic_tiering_,
native_module_->IsInDebugState(), kNotLazy);
GetDefaultTiersPerModule(native_module_, dynamic_tiering_, kNotLazy);
uint8_t progress_for_eager_functions =
RequiredBaselineTierField::encode(default_tiers.baseline_tier) |
RequiredTopTierField::encode(default_tiers.top_tier) |
@ -3337,6 +3401,87 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
WaitForCompilationEvent(CompilationEvent::kFinishedBaselineCompilation);
}
void CompilationStateImpl::InitializeRecompilation(
TieringState new_tiering_state,
std::unique_ptr<CompilationEventCallback> recompilation_finished_callback) {
DCHECK(!failed());
// Hold the mutex as long as possible, to synchronize between multiple
// recompilations that are triggered at the same time (e.g. when the profiler
// is disabled).
base::Optional<base::MutexGuard> guard(&callbacks_mutex_);
// As long as there are outstanding recompilation functions, take part in
// compilation. This is to avoid recompiling for the same tier or for
// different tiers concurrently. Note that the compilation unit queues can run
// empty before {outstanding_recompilation_functions_} drops to zero. In this
// case, we do not wait for the last running compilation threads to finish
// their units, but just start our own recompilation already.
while (outstanding_recompilation_functions_ > 0 &&
compilation_unit_queues_.GetTotalSize() > 0) {
guard.reset();
constexpr JobDelegate* kNoDelegate = nullptr;
ExecuteCompilationUnits(native_module_weak_, async_counters_.get(),
kNoDelegate, kBaselineOrTopTier);
guard.emplace(&callbacks_mutex_);
}
// Information about compilation progress is shared between this class and the
// NativeModule. Before updating information here, consult the NativeModule to
// find all functions that need recompilation.
// Since the current tiering state is updated on the NativeModule before
// triggering recompilation, it's OK if the information is slightly outdated.
// If we compile functions twice, the NativeModule will ignore all redundant
// code (or code compiled for the wrong tier).
std::vector<int> recompile_function_indexes =
native_module_->FindFunctionsToRecompile(new_tiering_state);
callbacks_.emplace_back(std::move(recompilation_finished_callback));
tiering_state_ = new_tiering_state;
// If compilation progress is not initialized yet, then compilation didn't
// start yet, and new code will be kept tiered-down from the start. For
// streaming compilation, there is a special path to tier down later, when
// the module is complete. In any case, we don't need to recompile here.
base::Optional<CompilationUnitBuilder> builder;
if (compilation_progress_.size() > 0) {
builder.emplace(native_module_);
const WasmModule* module = native_module_->module();
DCHECK_EQ(module->num_declared_functions, compilation_progress_.size());
DCHECK_GE(module->num_declared_functions,
recompile_function_indexes.size());
outstanding_recompilation_functions_ =
static_cast<int>(recompile_function_indexes.size());
// Restart recompilation if another recompilation is already happening.
for (auto& progress : compilation_progress_) {
progress = MissingRecompilationField::update(progress, false);
}
auto new_tier = new_tiering_state == kTieredDown ? ExecutionTier::kLiftoff
: ExecutionTier::kTurbofan;
int imported = module->num_imported_functions;
// Generate necessary compilation units on the fly.
for (int function_index : recompile_function_indexes) {
DCHECK_LE(imported, function_index);
int slot_index = function_index - imported;
auto& progress = compilation_progress_[slot_index];
progress = MissingRecompilationField::update(progress, true);
builder->AddRecompilationUnit(function_index, new_tier);
}
}
// Trigger callback if module needs no recompilation.
if (outstanding_recompilation_functions_ == 0) {
TriggerCallbacks(base::EnumSet<CompilationEvent>(
{CompilationEvent::kFinishedRecompilation}));
}
if (builder.has_value()) {
// Avoid holding lock while scheduling a compile job.
guard.reset();
builder->Commit();
}
}
void CompilationStateImpl::AddCallback(
std::unique_ptr<CompilationEventCallback> callback) {
base::MutexGuard callbacks_guard(&callbacks_mutex_);
@ -3498,6 +3643,25 @@ void CompilationStateImpl::OnFinishedUnits(
bytes_since_last_chunk_ += code->instructions().size();
}
if (V8_UNLIKELY(MissingRecompilationField::decode(function_progress))) {
DCHECK_LT(0, outstanding_recompilation_functions_);
// If tiering up, accept any TurboFan code. For tiering down, look at
// the {for_debugging} flag. The tier can be Liftoff or TurboFan and is
// irrelevant here. In particular, we want to ignore any outstanding
// non-debugging units.
bool matches = tiering_state_ == kTieredDown
? code->for_debugging()
: code->tier() == ExecutionTier::kTurbofan;
if (matches) {
outstanding_recompilation_functions_--;
compilation_progress_[slot_index] = MissingRecompilationField::update(
compilation_progress_[slot_index], false);
if (outstanding_recompilation_functions_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedRecompilation);
}
}
}
// Update function's compilation progress.
if (code->tier() > reached_tier) {
compilation_progress_[slot_index] = ReachedTierField::update(
@ -3547,9 +3711,11 @@ void CompilationStateImpl::TriggerCallbacks(
// Don't trigger past events again.
triggered_events -= finished_events_;
// There can be multiple compilation chunks, thus do not store this.
finished_events_ |=
triggered_events - CompilationEvent::kFinishedCompilationChunk;
// Recompilation can happen multiple times, thus do not store this. There can
// also be multiple compilation chunks.
finished_events_ |= triggered_events -
CompilationEvent::kFinishedRecompilation -
CompilationEvent::kFinishedCompilationChunk;
for (auto event :
{std::make_pair(CompilationEvent::kFailedCompilation,
@ -3559,7 +3725,9 @@ void CompilationStateImpl::TriggerCallbacks(
std::make_pair(CompilationEvent::kFinishedBaselineCompilation,
"wasm.BaselineFinished"),
std::make_pair(CompilationEvent::kFinishedCompilationChunk,
"wasm.CompilationChunkFinished")}) {
"wasm.CompilationChunkFinished"),
std::make_pair(CompilationEvent::kFinishedRecompilation,
"wasm.RecompilationFinished")}) {
if (!triggered_events.contains(event.first)) continue;
DCHECK_NE(compilation_id_, kInvalidCompilationID);
TRACE_EVENT1("v8.wasm", event.second, "id", compilation_id_);
@ -3568,7 +3736,8 @@ void CompilationStateImpl::TriggerCallbacks(
}
}
if (outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0) {
if (outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
outstanding_recompilation_functions_ == 0) {
auto new_end = std::remove_if(
callbacks_.begin(), callbacks_.end(), [](const auto& callback) {
return callback->release_after_final_event();
@ -3881,7 +4050,7 @@ WasmCode* CompileImportWrapper(
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), GetCodeKind(result),
ExecutionTier::kNone, kNotForDebugging);
ExecutionTier::kNone, kNoDebugging);
published_code = native_module->PublishCode(std::move(wasm_code));
}
(*cache_scope)[key] = published_code;

View File

@ -61,6 +61,9 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
int compilation_id, v8::metrics::Recorder::ContextId context_id,
ProfileInformation* pgo_info);
void RecompileNativeModule(NativeModule* native_module,
TieringState new_tiering_state);
V8_EXPORT_PRIVATE
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module);

View File

@ -1144,7 +1144,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
source_pos.as_vector(), // source positions
WasmCode::kWasmFunction, // kind
ExecutionTier::kNone, // tier
kNotForDebugging}}; // for_debugging
kNoDebugging}}; // for_debugging
new_code->MaybePrint();
new_code->Validate();
@ -1344,13 +1344,13 @@ WasmCode* NativeModule::PublishCodeLocked(
// code table of jump table). Otherwise, install code if it was compiled
// with a higher tier.
static_assert(
kForDebugging > kNotForDebugging && kWithBreakpoints > kForDebugging,
kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
"for_debugging is ordered");
const bool update_code_table =
// Never install stepping code.
code->for_debugging() != kForStepping &&
(!prior_code ||
(debug_state_ == kDebugging
(tiering_state_ == kTieredDown
// Tiered down: Install breakpoints over normal debug code.
? prior_code->for_debugging() <= code->for_debugging()
// Tiered up: Install if the tier is higher than before or we
@ -1387,7 +1387,7 @@ void NativeModule::ReinstallDebugCode(WasmCode* code) {
DCHECK_LT(code->index(), num_functions());
// If the module is tiered up by now, do not reinstall debug code.
if (debug_state_ != kDebugging) return;
if (tiering_state_ != kTieredDown) return;
uint32_t slot_idx = declared_function_index(module(), code->index());
if (WasmCode* prior_code = code_table_[slot_idx]) {
@ -1422,13 +1422,13 @@ std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
base::Vector<const byte> reloc_info,
base::Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier) {
UpdateCodeSize(instructions.size(), tier, kNotForDebugging);
UpdateCodeSize(instructions.size(), tier, kNoDebugging);
return std::unique_ptr<WasmCode>{new WasmCode{
this, index, instructions, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, unpadded_binary_size, protected_instructions_data,
reloc_info, source_position_table, kind, tier, kNotForDebugging}};
reloc_info, source_position_table, kind, tier, kNoDebugging}};
}
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
@ -1506,7 +1506,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
base::Vector<uint8_t> code_space =
code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
DCHECK(!code_space.empty());
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNotForDebugging);
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{
new WasmCode{this, // native_module
@ -1524,13 +1524,13 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
{}, // source_pos
WasmCode::kJumpTable, // kind
ExecutionTier::kNone, // tier
kNotForDebugging}}; // for_debugging
kNoDebugging}}; // for_debugging
return PublishCodeLocked(std::move(code));
}
void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
ForDebugging for_debugging) {
if (for_debugging != kNotForDebugging) return;
if (for_debugging != kNoDebugging) return;
// Count jump tables (ExecutionTier::kNone) for both Liftoff and TurboFan as
// this is shared code.
if (tier != ExecutionTier::kTurbofan) liftoff_code_size_.fetch_add(size);
@ -2410,12 +2410,17 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
return generated_code;
}
void NativeModule::SetDebugState(DebugState new_debug_state) {
void NativeModule::SetTieringState(TieringState new_tiering_state) {
// Do not tier down asm.js (just never change the tiering state).
if (module()->origin != kWasmOrigin) return;
base::RecursiveMutexGuard lock(&allocation_mutex_);
debug_state_ = new_debug_state;
tiering_state_ = new_tiering_state;
}
bool NativeModule::IsTieredDown() {
base::RecursiveMutexGuard lock(&allocation_mutex_);
return tiering_state_ == kTieredDown;
}
void NativeModule::RemoveAllCompiledCode() {
@ -2430,6 +2435,75 @@ void NativeModule::RemoveAllCompiledCode() {
}
}
void NativeModule::RecompileForTiering() {
// If baseline compilation is not finished yet, we do not tier down now. This
// would be tricky because not all code is guaranteed to be available yet.
// Instead, we tier down after streaming compilation finished.
if (!compilation_state_->baseline_compilation_finished()) return;
// Read the tiering state under the lock, then trigger recompilation after
// releasing the lock. If the tiering state was changed when the triggered
// compilation units finish, code installation will handle that correctly.
TieringState current_state;
{
base::RecursiveMutexGuard lock(&allocation_mutex_);
current_state = tiering_state_;
// Initialize {cached_code_} to signal that this cache should get filled
// from now on.
if (!cached_code_) {
cached_code_ = std::make_unique<
std::map<std::pair<ExecutionTier, int>, WasmCode*>>();
// Fill with existing code.
for (auto& code_entry : owned_code_) {
InsertToCodeCache(code_entry.second.get());
}
}
}
RecompileNativeModule(this, current_state);
}
std::vector<int> NativeModule::FindFunctionsToRecompile(
TieringState new_tiering_state) {
WasmCodeRefScope code_ref_scope;
base::RecursiveMutexGuard guard(&allocation_mutex_);
// Get writable permission already here (and not inside the loop in
// {PatchJumpTablesLocked}), to avoid switching for each slot individually.
CodeSpaceWriteScope code_space_write_scope(this);
std::vector<int> function_indexes;
int imported = module()->num_imported_functions;
int declared = module()->num_declared_functions;
const bool tier_down = new_tiering_state == kTieredDown;
for (int slot_index = 0; slot_index < declared; ++slot_index) {
int function_index = imported + slot_index;
WasmCode* old_code = code_table_[slot_index];
bool code_is_good =
tier_down ? old_code && old_code->for_debugging()
: old_code && old_code->tier() == ExecutionTier::kTurbofan;
if (code_is_good) continue;
DCHECK_NOT_NULL(cached_code_);
auto cache_it = cached_code_->find(std::make_pair(
tier_down ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan,
function_index));
if (cache_it != cached_code_->end()) {
WasmCode* cached_code = cache_it->second;
if (old_code) {
WasmCodeRefScope::AddRef(old_code);
// The code is added to the current {WasmCodeRefScope}, hence the ref
// count cannot drop to zero here.
old_code->DecRefOnLiveCode();
}
code_table_[slot_index] = cached_code;
PatchJumpTablesLocked(slot_index, cached_code->instruction_start());
cached_code->IncRef();
continue;
}
// Otherwise add the function to the set of functions to recompile.
function_indexes.push_back(function_index);
}
return function_indexes;
}
void NativeModule::FreeCode(base::Vector<WasmCode* const> codes) {
base::RecursiveMutexGuard guard(&allocation_mutex_);
// Free the code space.

View File

@ -830,20 +830,29 @@ class V8_EXPORT_PRIVATE NativeModule final {
V8_WARN_UNUSED_RESULT std::vector<std::unique_ptr<WasmCode>> AddCompiledCode(
base::Vector<WasmCompilationResult>);
// Set a new debugging state, but don't trigger any recompilation;
// recompilation happens lazily.
void SetDebugState(DebugState);
// Set a new tiering state, but don't trigger any recompilation yet; use
// {RecompileForTiering} for that. The two steps are split because In some
// scenarios we need to drop locks before triggering recompilation.
void SetTieringState(TieringState);
// Check whether this modules is in debug state.
DebugState IsInDebugState() const {
base::RecursiveMutexGuard lock(&allocation_mutex_);
return debug_state_;
}
// Check whether this modules is tiered down for debugging.
bool IsTieredDown();
// Remove all compiled code from the {NativeModule} and replace it with
// {CompileLazy} builtins.
void RemoveAllCompiledCode();
// Fully recompile this module in the tier set previously via
// {SetTieringState}. The calling thread contributes to compilation and only
// returns once recompilation is done.
void RecompileForTiering();
// Find all functions that need to be recompiled for a new tier. Note that
// compilation jobs might run concurrently, so this method only considers the
// compilation state of this native module at the time of the call.
// Returns a vector of function indexes to recompile.
std::vector<int> FindFunctionsToRecompile(TieringState);
// Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all
// {WasmCode} objects must not be used any more.
@ -1010,7 +1019,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::unique_ptr<NamesProvider> names_provider_;
DebugState debug_state_ = kNotDebugging;
TieringState tiering_state_ = kTieredUp;
// Cache both baseline and top-tier code if we are debugging, to speed up
// repeated enabling/disabling of the debugger or profiler.

View File

@ -390,8 +390,8 @@ struct WasmEngine::IsolateInfo {
const std::shared_ptr<Counters> async_counters;
// Keep new modules in debug state.
bool keep_in_debug_state = false;
// Keep new modules in tiered down state.
bool keep_tiered_down = false;
// Keep track whether we already added a sample for PKU support (we only want
// one sample per Isolate).
@ -714,14 +714,14 @@ void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
&native_module->module()->functions[function_index], tier);
}
void WasmEngine::EnterDebuggingForIsolate(Isolate* isolate) {
void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
std::vector<std::shared_ptr<NativeModule>> native_modules;
{
base::MutexGuard lock(&mutex_);
if (isolates_[isolate]->keep_in_debug_state) return;
isolates_[isolate]->keep_in_debug_state = true;
if (isolates_[isolate]->keep_tiered_down) return;
isolates_[isolate]->keep_tiered_down = true;
for (auto* native_module : isolates_[isolate]->native_modules) {
native_module->SetDebugState(kDebugging);
native_module->SetTieringState(kTieredDown);
DCHECK_EQ(1, native_modules_.count(native_module));
if (auto shared_ptr = native_modules_[native_module]->weak_ptr.lock()) {
native_modules.emplace_back(std::move(shared_ptr));
@ -729,7 +729,7 @@ void WasmEngine::EnterDebuggingForIsolate(Isolate* isolate) {
}
}
for (auto& native_module : native_modules) {
native_module->RemoveAllCompiledCode();
native_module->RecompileForTiering();
}
}
@ -740,12 +740,12 @@ void WasmEngine::LeaveDebuggingForIsolate(Isolate* isolate) {
std::vector<std::pair<std::shared_ptr<NativeModule>, bool>> native_modules;
{
base::MutexGuard lock(&mutex_);
isolates_[isolate]->keep_in_debug_state = false;
isolates_[isolate]->keep_tiered_down = false;
auto can_remove_debug_code = [this](NativeModule* native_module) {
DCHECK_EQ(1, native_modules_.count(native_module));
for (auto* isolate : native_modules_[native_module]->isolates) {
DCHECK_EQ(1, isolates_.count(isolate));
if (isolates_[isolate]->keep_in_debug_state) return false;
if (isolates_[isolate]->keep_tiered_down) return false;
}
return true;
};
@ -753,11 +753,11 @@ void WasmEngine::LeaveDebuggingForIsolate(Isolate* isolate) {
DCHECK_EQ(1, native_modules_.count(native_module));
auto shared_ptr = native_modules_[native_module]->weak_ptr.lock();
if (!shared_ptr) continue; // The module is not used any more.
if (!native_module->IsInDebugState()) continue;
if (!native_module->IsTieredDown()) continue;
// Only start tier-up if no other isolate needs this module in tiered
// down state.
bool remove_debug_code = can_remove_debug_code(native_module);
if (remove_debug_code) native_module->SetDebugState(kNotDebugging);
if (remove_debug_code) native_module->SetTieringState(kTieredUp);
native_modules.emplace_back(std::move(shared_ptr), remove_debug_code);
}
}
@ -1205,8 +1205,8 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
pair.first->second.get()->isolates.insert(isolate);
auto* isolate_info = isolates_[isolate].get();
isolate_info->native_modules.insert(native_module.get());
if (isolate_info->keep_in_debug_state) {
native_module->SetDebugState(kDebugging);
if (isolate_info->keep_tiered_down) {
native_module->SetTieringState(kTieredDown);
}
// Record memory protection key support.
@ -1232,7 +1232,7 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
wire_bytes.size());
std::shared_ptr<NativeModule> native_module =
native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
bool remove_all_code = false;
bool recompile_module = false;
if (native_module) {
TRACE_EVENT0("v8.wasm", "CacheHit");
base::MutexGuard guard(&mutex_);
@ -1242,12 +1242,13 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
}
native_module_info->isolates.insert(isolate);
isolates_[isolate]->native_modules.insert(native_module.get());
if (isolates_[isolate]->keep_in_debug_state) {
native_module->SetDebugState(kDebugging);
remove_all_code = true;
if (isolates_[isolate]->keep_tiered_down) {
native_module->SetTieringState(kTieredDown);
recompile_module = true;
}
}
if (remove_all_code) native_module->RemoveAllCompiledCode();
// Potentially recompile the module for tier down, after releasing the mutex.
if (recompile_module) native_module->RecompileForTiering();
return native_module;
}
@ -1260,19 +1261,21 @@ std::shared_ptr<NativeModule> WasmEngine::UpdateNativeModuleCache(
native_module =
native_module_cache_.Update(std::move(native_module), has_error);
if (prev == native_module.get()) return native_module;
bool remove_all_code = false;
bool recompile_module = false;
{
base::MutexGuard guard(&mutex_);
DCHECK_EQ(1, native_modules_.count(native_module.get()));
native_modules_[native_module.get()]->isolates.insert(isolate);
DCHECK_EQ(1, isolates_.count(isolate));
isolates_[isolate]->native_modules.insert(native_module.get());
if (isolates_[isolate]->keep_in_debug_state) {
remove_all_code = true;
native_module->SetDebugState(kDebugging);
if (isolates_[isolate]->keep_tiered_down) {
native_module->SetTieringState(kTieredDown);
recompile_module = true;
}
}
if (remove_all_code) native_module->RemoveAllCompiledCode();
// Potentially recompile the module for tier down, after releasing the mutex.
if (recompile_module) native_module->RecompileForTiering();
return native_module;
}

View File

@ -204,7 +204,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
void CompileFunction(Isolate* isolate, NativeModule* native_module,
uint32_t function_index, ExecutionTier tier);
void EnterDebuggingForIsolate(Isolate* isolate);
void TierDownAllModulesPerIsolate(Isolate* isolate);
void LeaveDebuggingForIsolate(Isolate* isolate);

View File

@ -1446,7 +1446,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
result.tagged_parameter_slots,
result.protected_instructions_data.as_vector(),
result.source_positions.as_vector(), GetCodeKind(result),
wasm::ExecutionTier::kNone, wasm::kNotForDebugging);
wasm::ExecutionTier::kNone, wasm::kNoDebugging);
wasm::WasmCode* published_code =
native_module->PublishCode(std::move(wasm_code));
isolate->counters()->wasm_generated_code_size()->Increment(

View File

@ -907,7 +907,7 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
isolate, enabled_features, std::move(module), code_size_estimate);
// We have to assign a compilation ID here, as it is required for a
// potential re-compilation, e.g. triggered by
// {EnterDebuggingForIsolate}. The value is -2 so that it is different
// {TierDownAllModulesPerIsolate}. The value is -2 so that it is different
// than the compilation ID of actual compilations, and also different than
// the sentinel value of the CompilationState.
shared_native_module->compilation_state()->set_compilation_id(-2);

View File

@ -37,13 +37,13 @@ inline const char* ExecutionTierToString(ExecutionTier tier) {
// the code also contains breakpoints, and {kForStepping} for code that is
// flooded with breakpoints.
enum ForDebugging : int8_t {
kNotForDebugging = 0,
kNoDebugging = 0,
kForDebugging,
kWithBreakpoints,
kForStepping
};
enum DebugState : bool { kNotDebugging = false, kDebugging = true };
enum TieringState : int8_t { kTieredUp, kTieredDown };
} // namespace wasm
} // namespace internal

View File

@ -26,7 +26,7 @@ class LiftoffCompileEnvironment {
// Add a table of length 1, for indirect calls.
wasm_runner_.builder().AddIndirectFunctionTable(nullptr, 1);
// Set tiered down such that we generate debugging code.
wasm_runner_.builder().SetDebugState();
wasm_runner_.builder().SetTieredDown();
}
struct TestFunction {

View File

@ -3954,7 +3954,7 @@ TEST(Liftoff_tier_up) {
CodeSpaceWriteScope write_scope(native_module);
std::unique_ptr<WasmCode> new_code = native_module->AddCode(
add.function_index(), desc, 0, 0, {}, {}, WasmCode::kWasmFunction,
ExecutionTier::kTurbofan, kNotForDebugging);
ExecutionTier::kTurbofan, kNoDebugging);
native_module->PublishCode(std::move(new_code));
}

View File

@ -1601,7 +1601,7 @@ STREAM_TEST(TierDownWithError) {
builder.WriteTo(&buffer);
}
GetWasmEngine()->EnterDebuggingForIsolate(i_isolate);
GetWasmEngine()->TierDownAllModulesPerIsolate(i_isolate);
tester.OnBytesReceived(buffer.begin(), buffer.size());
tester.FinishStream();

View File

@ -140,7 +140,7 @@ class BreakHandler : public debug::DebugDelegate {
Handle<BreakPoint> SetBreakpoint(WasmRunnerBase* runner, int function_index,
int byte_offset,
int expected_set_byte_offset = -1) {
runner->SwitchToDebug();
runner->TierDown();
int func_offset =
runner->builder().GetFunctionAt(function_index)->code.offset();
int code_offset = func_offset + byte_offset;
@ -370,7 +370,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleStepping) {
WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
WasmRunner<int, int> runner(execution_tier);
runner.SwitchToDebug();
runner.TierDown();
WasmFunctionCompiler& f2 = runner.NewFunction<void>();
f2.AllocateLocal(kWasmI32);

View File

@ -342,11 +342,10 @@ TEST(TierDownAfterDeserialization) {
CHECK_NOT_NULL(turbofan_code);
CHECK_EQ(ExecutionTier::kTurbofan, turbofan_code->tier());
GetWasmEngine()->EnterDebuggingForIsolate(isolate);
GetWasmEngine()->TierDownAllModulesPerIsolate(isolate);
// Entering debugging should delete all code, so that debug code gets compiled
// lazily.
CHECK_NULL(native_module->GetCode(0));
auto* liftoff_code = native_module->GetCode(0);
CHECK_EQ(ExecutionTier::kLiftoff, liftoff_code->tier());
}
TEST(SerializeLiftoffModuleFails) {

View File

@ -611,7 +611,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
NativeModule* native_module =
builder_->instance_object()->module_object().native_module();
ForDebugging for_debugging =
native_module->IsInDebugState() ? kForDebugging : kNotForDebugging;
native_module->IsTieredDown() ? kForDebugging : kNoDebugging;
base::Optional<WasmCompilationResult> result;
if (builder_->test_execution_tier() ==

View File

@ -244,14 +244,14 @@ class TestingModuleBuilder {
return reinterpret_cast<Address>(globals_data_);
}
void SetDebugState() {
native_module_->SetDebugState(kDebugging);
void SetTieredDown() {
native_module_->SetTieringState(kTieredDown);
execution_tier_ = TestExecutionTier::kLiftoff;
}
void SwitchToDebug() {
SetDebugState();
native_module_->RemoveAllCompiledCode();
void TierDown() {
SetTieredDown();
native_module_->RecompileForTiering();
}
CompilationEnv CreateCompilationEnv();
@ -471,7 +471,7 @@ class WasmRunnerBase : public InitializedHandleScope {
bool interpret() { return builder_.interpret(); }
void SwitchToDebug() { builder_.SwitchToDebug(); }
void TierDown() { builder_.TierDown(); }
template <typename ReturnType, typename... ParamTypes>
FunctionSig* CreateSig() {

View File

@ -17,21 +17,19 @@ function create_builder(delta = 0) {
return builder;
}
function checkDebugCode(instance) {
function checkTieredDown(instance) {
for (let i = 0; i < num_functions; ++i) {
// Call the function once because of lazy compilation.
instance.exports['f' + i]();
assertTrue(%IsWasmDebugFunction(instance.exports['f' + i]));
assertTrue(%IsLiftoffFunction(instance.exports['f' + i]));
}
}
function waitForNoDebugCode(instance) {
// Busy waiting until all functions left debug mode.
function waitForTieredUp(instance) {
// Busy waiting until all functions are tiered up.
let num_liftoff_functions = 0;
while (true) {
num_liftoff_functions = 0;
for (let i = 0; i < num_functions; ++i) {
if (%IsWasmDebugFunction(instance.exports['f' + i])) {
if (%IsLiftoffFunction(instance.exports['f' + i])) {
num_liftoff_functions++;
}
}
@ -41,37 +39,37 @@ function waitForNoDebugCode(instance) {
const Debug = new DebugWrapper();
(function testEnterDebugMode() {
(function testTierDownToLiftoff() {
// In the 'isolates' test, this test runs in parallel to itself on two
// isolates. All checks below should still hold.
const instance = create_builder(0).instantiate();
Debug.enable();
checkDebugCode(instance);
checkTieredDown(instance);
const instance2 = create_builder(1).instantiate();
checkDebugCode(instance2);
checkTieredDown(instance2);
Debug.disable();
// Eventually the instances will have completely left debug mode again.
waitForNoDebugCode(instance);
waitForNoDebugCode(instance2);
// Eventually the instances will be completely tiered up again.
waitForTieredUp(instance);
waitForTieredUp(instance2);
})();
// Test async compilation.
assertPromiseResult((async function testEnterDebugModeAsync() {
assertPromiseResult((async function testTierDownToLiftoffAsync() {
// First test: enable the debugger *after* compiling the module.
const instance = await create_builder(2).asyncInstantiate();
Debug.enable();
checkDebugCode(instance);
checkTieredDown(instance);
const instance2 = await create_builder(3).asyncInstantiate();
checkDebugCode(instance2);
checkTieredDown(instance2);
Debug.disable();
waitForNoDebugCode(instance);
waitForNoDebugCode(instance2);
waitForTieredUp(instance);
waitForTieredUp(instance2);
// Second test: enable the debugger *while* compiling the module.
const instancePromise = create_builder(4).asyncInstantiate();
Debug.enable();
const instance3 = await instancePromise;
checkDebugCode(instance3);
checkTieredDown(instance3);
Debug.disable();
waitForNoDebugCode(instance3);
waitForTieredUp(instance3);
})());

View File

@ -1333,7 +1333,7 @@
'wasm/liftoff': [SKIP],
'wasm/liftoff-debug': [SKIP],
'wasm/tier-up-testing-flag': [SKIP],
'wasm/enter-debug-state': [SKIP],
'wasm/tier-down-to-liftoff': [SKIP],
'wasm/wasm-dynamic-tiering': [SKIP],
'wasm/test-partial-serialization': [SKIP],
'regress/wasm/regress-1248024': [SKIP],

View File

@ -18,22 +18,20 @@ function create_builder(delta = 0) {
return builder;
}
function checkForDebugCode(instance) {
function checkTieredDown(instance) {
for (let i = 0; i < num_functions; ++i) {
// Call the function once because of lazy compilation.
instance.exports['f' + i]();
assertTrue(%IsWasmDebugFunction(instance.exports['f' + i]));
assertTrue(%IsLiftoffFunction(instance.exports['f' + i]));
}
}
function check(instance) {
%WasmEnterDebugging();
checkForDebugCode(instance);
%WasmTierDown();
checkTieredDown(instance);
for (let i = 0; i < num_functions; ++i) {
%WasmTierUpFunction(instance, i);
}
checkForDebugCode(instance);
checkTieredDown(instance);
}
(function testTierDownToLiftoff() {