Reland: "[wasm] Compile debug code lazily"
Three issues were fixed: * In debug state, only publish debug code. * When entering debugging in an isolate, only delete the code of those NativeModules that aren't in debug state already. * When async compilation finishes, only throw away code if the debug state changed during compilation. Original message: Currently V8 recompiles all functions of a WebAssembly module when a debugging session starts. This is outdated behavior and causes OOMs for developers. With this CL all compiled code just gets removed when a debugging session starts, and debugging code gets compiled lazily. This behavior may lead to small delays whenever a new function gets entered by the debugger. However, developers are used to debugging code being slightly slower, and the small delays should be in the order of few milliseconds. On the other hand, debug modules can be big, sometimes even more than 1'000'000 functions, and developers reported OOMs when debugging. R=clemensb@chromium.org Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_isolates_rel Bug: v8:13541, chromium:1372621, v8:13224 Change-Id: Ie27388a287cd16a67a483e14fc22c2ab4180962e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4079190 Reviewed-by: Clemens Backes <clemensb@chromium.org> Commit-Queue: Andreas Haas <ahaas@chromium.org> Reviewed-by: Kim-Anh Tran <kimanh@chromium.org> Cr-Commit-Position: refs/heads/main@{#84873}
This commit is contained in:
parent
e0399e4394
commit
37e5a28add
@ -8285,7 +8285,7 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
|
||||
result.tagged_parameter_slots,
|
||||
result.protected_instructions_data.as_vector(),
|
||||
result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
|
||||
wasm::ExecutionTier::kNone, wasm::kNoDebugging);
|
||||
wasm::ExecutionTier::kNone, wasm::kNotForDebugging);
|
||||
published_code = native_module->PublishCode(std::move(wasm_code));
|
||||
}
|
||||
return published_code;
|
||||
@ -8338,7 +8338,7 @@ wasm::WasmCode* CompileWasmJSFastCallWrapper(wasm::NativeModule* native_module,
|
||||
result.tagged_parameter_slots,
|
||||
result.protected_instructions_data.as_vector(),
|
||||
result.source_positions.as_vector(), wasm::WasmCode::kWasmToJsWrapper,
|
||||
wasm::ExecutionTier::kNone, wasm::kNoDebugging);
|
||||
wasm::ExecutionTier::kNone, wasm::kNotForDebugging);
|
||||
return native_module->PublishCode(std::move(wasm_code));
|
||||
}
|
||||
}
|
||||
|
@ -958,9 +958,9 @@ MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* v8_isolate,
|
||||
}
|
||||
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
void TierDownAllModulesPerIsolate(Isolate* v8_isolate) {
|
||||
void EnterDebuggingForIsolate(Isolate* v8_isolate) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
|
||||
i::wasm::GetWasmEngine()->TierDownAllModulesPerIsolate(isolate);
|
||||
i::wasm::GetWasmEngine()->EnterDebuggingForIsolate(isolate);
|
||||
}
|
||||
|
||||
void LeaveDebuggingForIsolate(Isolate* v8_isolate) {
|
||||
|
@ -316,7 +316,7 @@ V8_EXPORT_PRIVATE void SetDebugDelegate(Isolate* isolate,
|
||||
DebugDelegate* listener);
|
||||
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
V8_EXPORT_PRIVATE void TierDownAllModulesPerIsolate(Isolate* isolate);
|
||||
V8_EXPORT_PRIVATE void EnterDebuggingForIsolate(Isolate* isolate);
|
||||
V8_EXPORT_PRIVATE void LeaveDebuggingForIsolate(Isolate* isolate);
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
|
||||
|
@ -395,7 +395,7 @@ GdbServer::DebugDelegate::DebugDelegate(Isolate* isolate, GdbServer* gdb_server)
|
||||
|
||||
// Register the delegate
|
||||
isolate_->debug()->SetDebugDelegate(this);
|
||||
v8::debug::TierDownAllModulesPerIsolate((v8::Isolate*)isolate_);
|
||||
v8::debug::EnterDebuggingForIsolate((v8::Isolate*)isolate_);
|
||||
v8::debug::ChangeBreakOnException((v8::Isolate*)isolate_,
|
||||
v8::debug::BreakOnUncaughtException);
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ void V8Debugger::enable() {
|
||||
v8::debug::ChangeBreakOnException(m_isolate, v8::debug::NoBreakOnException);
|
||||
m_pauseOnExceptionsState = v8::debug::NoBreakOnException;
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
v8::debug::TierDownAllModulesPerIsolate(m_isolate);
|
||||
v8::debug::EnterDebuggingForIsolate(m_isolate);
|
||||
#endif // V8_ENABLE_WEBASSEMBLY
|
||||
}
|
||||
|
||||
|
@ -432,13 +432,29 @@ RUNTIME_FUNCTION(Runtime_WasmTierUpFunction) {
|
||||
return ReadOnlyRoots(isolate).undefined_value();
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_WasmTierDown) {
|
||||
RUNTIME_FUNCTION(Runtime_WasmEnterDebugging) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(0, args.length());
|
||||
wasm::GetWasmEngine()->TierDownAllModulesPerIsolate(isolate);
|
||||
wasm::GetWasmEngine()->EnterDebuggingForIsolate(isolate);
|
||||
return ReadOnlyRoots(isolate).undefined_value();
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_IsWasmDebugFunction) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
Handle<JSFunction> function = args.at<JSFunction>(0);
|
||||
CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
|
||||
Handle<WasmExportedFunction> exp_fun =
|
||||
Handle<WasmExportedFunction>::cast(function);
|
||||
wasm::NativeModule* native_module =
|
||||
exp_fun->instance().module_object().native_module();
|
||||
uint32_t func_index = exp_fun->function_index();
|
||||
wasm::WasmCodeRefScope code_ref_scope;
|
||||
wasm::WasmCode* code = native_module->GetCode(func_index);
|
||||
return isolate->heap()->ToBoolean(code && code->is_liftoff() &&
|
||||
code->for_debugging());
|
||||
}
|
||||
|
||||
RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
|
||||
HandleScope scope(isolate);
|
||||
DCHECK_EQ(1, args.length());
|
||||
|
@ -657,6 +657,7 @@ namespace internal {
|
||||
F(IsAsmWasmCode, 1, 1) \
|
||||
F(IsLiftoffFunction, 1, 1) \
|
||||
F(IsTurboFanFunction, 1, 1) \
|
||||
F(IsWasmDebugFunction, 1, 1) \
|
||||
F(IsThreadInWasm, 0, 1) \
|
||||
F(IsWasmCode, 1, 1) \
|
||||
F(IsWasmTrapHandlerEnabled, 0, 1) \
|
||||
@ -665,7 +666,7 @@ namespace internal {
|
||||
F(SetWasmInstantiateControls, 0, 1) \
|
||||
F(WasmGetNumberOfInstances, 1, 1) \
|
||||
F(WasmNumCodeSpaces, 1, 1) \
|
||||
F(WasmTierDown, 0, 1) \
|
||||
F(WasmEnterDebugging, 0, 1) \
|
||||
F(WasmTierUpFunction, 2, 1) \
|
||||
F(WasmTraceEnter, 0, 1) \
|
||||
F(WasmTraceExit, 1, 1) \
|
||||
|
@ -775,7 +775,7 @@ class LiftoffCompiler {
|
||||
// overflows in the budget calculation.
|
||||
DCHECK_LE(1, budget_used);
|
||||
|
||||
if (for_debugging_ != kNoDebugging) return;
|
||||
if (for_debugging_ != kNotForDebugging) return;
|
||||
CODE_COMMENT("tierup check");
|
||||
// We never want to blow the entire budget at once.
|
||||
const int kMax = v8_flags.wasm_tiering_budget / 4;
|
||||
@ -857,7 +857,7 @@ class LiftoffCompiler {
|
||||
}
|
||||
|
||||
bool dynamic_tiering() {
|
||||
return env_->dynamic_tiering && for_debugging_ == kNoDebugging &&
|
||||
return env_->dynamic_tiering && for_debugging_ == kNotForDebugging &&
|
||||
(v8_flags.wasm_tier_up_filter == -1 ||
|
||||
v8_flags.wasm_tier_up_filter == func_index_);
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ enum LiftoffBailoutReason : int8_t {
|
||||
|
||||
struct LiftoffOptions {
|
||||
int func_index = -1;
|
||||
ForDebugging for_debugging = kNoDebugging;
|
||||
ForDebugging for_debugging = kNotForDebugging;
|
||||
Counters* counters = nullptr;
|
||||
AssemblerBufferCache* assembler_buffer_cache = nullptr;
|
||||
WasmFeatures* detected_features = nullptr;
|
||||
|
@ -124,7 +124,6 @@ enum class CompilationEvent : uint8_t {
|
||||
kFinishedExportWrappers,
|
||||
kFinishedCompilationChunk,
|
||||
kFailedCompilation,
|
||||
kFinishedRecompilation
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE CompilationEventCallback {
|
||||
@ -175,7 +174,6 @@ class V8_EXPORT_PRIVATE CompilationState {
|
||||
|
||||
bool failed() const;
|
||||
bool baseline_compilation_finished() const;
|
||||
bool recompilation_finished() const;
|
||||
|
||||
void set_compilation_id(int compilation_id);
|
||||
|
||||
|
@ -162,7 +162,7 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
|
||||
|
||||
DCHECK_LE(native_module->num_imported_functions(), function->func_index);
|
||||
DCHECK_LT(function->func_index, native_module->num_functions());
|
||||
WasmCompilationUnit unit(function->func_index, tier, kNoDebugging);
|
||||
WasmCompilationUnit unit(function->func_index, tier, kNotForDebugging);
|
||||
CompilationEnv env = native_module->CreateCompilationEnv();
|
||||
WasmCompilationResult result = unit.ExecuteCompilation(
|
||||
&env, native_module->compilation_state()->GetWireBytesStorage().get(),
|
||||
|
@ -56,13 +56,16 @@ struct WasmCompilationResult {
|
||||
ExecutionTier requested_tier;
|
||||
ExecutionTier result_tier;
|
||||
Kind kind = kFunction;
|
||||
ForDebugging for_debugging = kNoDebugging;
|
||||
ForDebugging for_debugging = kNotForDebugging;
|
||||
};
|
||||
|
||||
class V8_EXPORT_PRIVATE WasmCompilationUnit final {
|
||||
public:
|
||||
WasmCompilationUnit(int index, ExecutionTier tier, ForDebugging for_debugging)
|
||||
: func_index_(index), tier_(tier), for_debugging_(for_debugging) {}
|
||||
: func_index_(index), tier_(tier), for_debugging_(for_debugging) {
|
||||
DCHECK_IMPLIES(for_debugging != ForDebugging::kNotForDebugging,
|
||||
tier_ == ExecutionTier::kLiftoff);
|
||||
}
|
||||
|
||||
WasmCompilationResult ExecuteCompilation(CompilationEnv*,
|
||||
const WireBytesStorage*, Counters*,
|
||||
|
@ -575,8 +575,6 @@ class CompilationStateImpl {
|
||||
int num_export_wrappers,
|
||||
ProfileInformation* pgo_info);
|
||||
|
||||
// Initialize the compilation progress after deserialization. This is needed
|
||||
// for recompilation (e.g. for tier down) to work later.
|
||||
void InitializeCompilationProgressAfterDeserialization(
|
||||
base::Vector<const int> lazy_functions,
|
||||
base::Vector<const int> eager_functions);
|
||||
@ -591,14 +589,6 @@ class CompilationStateImpl {
|
||||
// equivalent to {InitializeCompilationUnits}.
|
||||
void AddCompilationUnit(CompilationUnitBuilder* builder, int func_index);
|
||||
|
||||
// Initialize recompilation of the whole module: Setup compilation progress
|
||||
// for recompilation and add the respective compilation units. The callback is
|
||||
// called immediately if no recompilation is needed, or called later
|
||||
// otherwise.
|
||||
void InitializeRecompilation(TieringState new_tiering_state,
|
||||
std::unique_ptr<CompilationEventCallback>
|
||||
recompilation_finished_callback);
|
||||
|
||||
// Add the callback to be called on compilation events. Needs to be
|
||||
// set before {CommitCompilationUnits} is run to ensure that it receives all
|
||||
// events. The callback object must support being deleted from any thread.
|
||||
@ -651,11 +641,6 @@ class CompilationStateImpl {
|
||||
outstanding_export_wrappers_ == 0;
|
||||
}
|
||||
|
||||
bool recompilation_finished() const {
|
||||
base::MutexGuard guard(&callbacks_mutex_);
|
||||
return outstanding_recompilation_functions_ == 0;
|
||||
}
|
||||
|
||||
DynamicTiering dynamic_tiering() const { return dynamic_tiering_; }
|
||||
|
||||
Counters* counters() const { return async_counters_.get(); }
|
||||
@ -770,9 +755,6 @@ class CompilationStateImpl {
|
||||
size_t bytes_since_last_chunk_ = 0;
|
||||
std::vector<uint8_t> compilation_progress_;
|
||||
|
||||
int outstanding_recompilation_functions_ = 0;
|
||||
TieringState tiering_state_ = kTieredUp;
|
||||
|
||||
// End of fields protected by {callbacks_mutex_}.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
@ -785,7 +767,6 @@ class CompilationStateImpl {
|
||||
using RequiredBaselineTierField = base::BitField8<ExecutionTier, 0, 2>;
|
||||
using RequiredTopTierField = base::BitField8<ExecutionTier, 2, 2>;
|
||||
using ReachedTierField = base::BitField8<ExecutionTier, 4, 2>;
|
||||
using MissingRecompilationField = base::BitField8<bool, 6, 1>;
|
||||
};
|
||||
|
||||
CompilationStateImpl* Impl(CompilationState* compilation_state) {
|
||||
@ -868,10 +849,6 @@ bool CompilationState::baseline_compilation_finished() const {
|
||||
return Impl(this)->baseline_compilation_finished();
|
||||
}
|
||||
|
||||
bool CompilationState::recompilation_finished() const {
|
||||
return Impl(this)->recompilation_finished();
|
||||
}
|
||||
|
||||
void CompilationState::set_compilation_id(int compilation_id) {
|
||||
Impl(this)->set_compilation_id(compilation_id);
|
||||
}
|
||||
@ -945,15 +922,19 @@ struct ExecutionTierPair {
|
||||
ExecutionTier top_tier;
|
||||
};
|
||||
|
||||
// Pass the debug state as a separate parameter to avoid data races: the debug
|
||||
// state may change between its use here and its use at the call site. To have
|
||||
// a consistent view on the debug state, the caller reads the debug state once
|
||||
// and then passes it to this function.
|
||||
ExecutionTierPair GetDefaultTiersPerModule(NativeModule* native_module,
|
||||
DynamicTiering dynamic_tiering,
|
||||
DebugState is_in_debug_state,
|
||||
bool lazy_module) {
|
||||
const WasmModule* module = native_module->module();
|
||||
if (is_asmjs_module(module)) {
|
||||
return {ExecutionTier::kTurbofan, ExecutionTier::kTurbofan};
|
||||
}
|
||||
// TODO(13224): Use lazy compilation for debug code.
|
||||
if (native_module->IsTieredDown()) {
|
||||
if (is_in_debug_state) {
|
||||
return {ExecutionTier::kLiftoff, ExecutionTier::kLiftoff};
|
||||
}
|
||||
if (lazy_module) {
|
||||
@ -968,14 +949,17 @@ ExecutionTierPair GetDefaultTiersPerModule(NativeModule* native_module,
|
||||
}
|
||||
|
||||
ExecutionTierPair GetLazyCompilationTiers(NativeModule* native_module,
|
||||
uint32_t func_index) {
|
||||
uint32_t func_index,
|
||||
DebugState is_in_debug_state) {
|
||||
DynamicTiering dynamic_tiering =
|
||||
Impl(native_module->compilation_state())->dynamic_tiering();
|
||||
// For lazy compilation, get the tiers we would use if lazy compilation is
|
||||
// disabled.
|
||||
constexpr bool kNotLazy = false;
|
||||
ExecutionTierPair tiers =
|
||||
GetDefaultTiersPerModule(native_module, dynamic_tiering, kNotLazy);
|
||||
ExecutionTierPair tiers = GetDefaultTiersPerModule(
|
||||
native_module, dynamic_tiering, is_in_debug_state, kNotLazy);
|
||||
// If we are in debug mode, we ignore compilation hints.
|
||||
if (is_in_debug_state) return tiers;
|
||||
|
||||
// Check if compilation hints override default tiering behaviour.
|
||||
if (native_module->enabled_features().has_compilation_hints()) {
|
||||
@ -1012,7 +996,7 @@ class CompilationUnitBuilder {
|
||||
void AddImportUnit(uint32_t func_index) {
|
||||
DCHECK_GT(native_module_->module()->num_imported_functions, func_index);
|
||||
baseline_units_.emplace_back(func_index, ExecutionTier::kNone,
|
||||
kNoDebugging);
|
||||
kNotForDebugging);
|
||||
}
|
||||
|
||||
void AddJSToWasmWrapperUnit(
|
||||
@ -1021,11 +1005,11 @@ class CompilationUnitBuilder {
|
||||
}
|
||||
|
||||
void AddBaselineUnit(int func_index, ExecutionTier tier) {
|
||||
baseline_units_.emplace_back(func_index, tier, kNoDebugging);
|
||||
baseline_units_.emplace_back(func_index, tier, kNotForDebugging);
|
||||
}
|
||||
|
||||
void AddTopTierUnit(int func_index, ExecutionTier tier) {
|
||||
tiering_units_.emplace_back(func_index, tier, kNoDebugging);
|
||||
tiering_units_.emplace_back(func_index, tier, kNotForDebugging);
|
||||
}
|
||||
|
||||
void AddDebugUnit(int func_index) {
|
||||
@ -1033,13 +1017,6 @@ class CompilationUnitBuilder {
|
||||
kForDebugging);
|
||||
}
|
||||
|
||||
void AddRecompilationUnit(int func_index, ExecutionTier tier) {
|
||||
// For recompilation, just treat all units like baseline units.
|
||||
baseline_units_.emplace_back(
|
||||
func_index, tier,
|
||||
tier == ExecutionTier::kLiftoff ? kForDebugging : kNoDebugging);
|
||||
}
|
||||
|
||||
bool Commit() {
|
||||
if (baseline_units_.empty() && tiering_units_.empty() &&
|
||||
js_to_wasm_wrapper_units_.empty()) {
|
||||
@ -1134,12 +1111,15 @@ bool CompileLazy(Isolate* isolate, WasmInstanceObject instance,
|
||||
|
||||
CompilationStateImpl* compilation_state =
|
||||
Impl(native_module->compilation_state());
|
||||
ExecutionTierPair tiers = GetLazyCompilationTiers(native_module, func_index);
|
||||
DebugState is_in_debug_state = native_module->IsInDebugState();
|
||||
ExecutionTierPair tiers =
|
||||
GetLazyCompilationTiers(native_module, func_index, is_in_debug_state);
|
||||
|
||||
DCHECK_LE(native_module->num_imported_functions(), func_index);
|
||||
DCHECK_LT(func_index, native_module->num_functions());
|
||||
WasmCompilationUnit baseline_unit{func_index, tiers.baseline_tier,
|
||||
kNoDebugging};
|
||||
WasmCompilationUnit baseline_unit{
|
||||
func_index, tiers.baseline_tier,
|
||||
is_in_debug_state ? kForDebugging : kNotForDebugging};
|
||||
CompilationEnv env = native_module->CreateCompilationEnv();
|
||||
// TODO(wasm): Use an assembler buffer cache for lazy compilation.
|
||||
AssemblerBufferCache* assembler_buffer_cache = nullptr;
|
||||
@ -1181,7 +1161,8 @@ bool CompileLazy(Isolate* isolate, WasmInstanceObject instance,
|
||||
if (GetCompileStrategy(module, native_module->enabled_features(), func_index,
|
||||
lazy_module) == CompileStrategy::kLazy &&
|
||||
tiers.baseline_tier < tiers.top_tier) {
|
||||
WasmCompilationUnit tiering_unit{func_index, tiers.top_tier, kNoDebugging};
|
||||
WasmCompilationUnit tiering_unit{func_index, tiers.top_tier,
|
||||
kNotForDebugging};
|
||||
compilation_state->CommitTopTierCompilationUnit(tiering_unit);
|
||||
}
|
||||
return true;
|
||||
@ -1391,7 +1372,7 @@ void TriggerTierUp(WasmInstanceObject instance, int func_index) {
|
||||
CompilationStateImpl* compilation_state =
|
||||
Impl(native_module->compilation_state());
|
||||
WasmCompilationUnit tiering_unit{func_index, ExecutionTier::kTurbofan,
|
||||
kNoDebugging};
|
||||
kNotForDebugging};
|
||||
|
||||
const WasmModule* module = native_module->module();
|
||||
int priority;
|
||||
@ -1989,44 +1970,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
|
||||
return native_module;
|
||||
}
|
||||
|
||||
void RecompileNativeModule(NativeModule* native_module,
|
||||
TieringState tiering_state) {
|
||||
// Install a callback to notify us once background recompilation finished.
|
||||
auto recompilation_finished_semaphore = std::make_shared<base::Semaphore>(0);
|
||||
auto* compilation_state = Impl(native_module->compilation_state());
|
||||
|
||||
class RecompilationFinishedCallback : public CompilationEventCallback {
|
||||
public:
|
||||
explicit RecompilationFinishedCallback(
|
||||
std::shared_ptr<base::Semaphore> recompilation_finished_semaphore)
|
||||
: recompilation_finished_semaphore_(
|
||||
std::move(recompilation_finished_semaphore)) {}
|
||||
|
||||
void call(CompilationEvent event) override {
|
||||
DCHECK_NE(CompilationEvent::kFailedCompilation, event);
|
||||
if (event == CompilationEvent::kFinishedRecompilation) {
|
||||
recompilation_finished_semaphore_->Signal();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<base::Semaphore> recompilation_finished_semaphore_;
|
||||
};
|
||||
|
||||
// The callback captures a shared ptr to the semaphore.
|
||||
// Initialize the compilation units and kick off background compile tasks.
|
||||
compilation_state->InitializeRecompilation(
|
||||
tiering_state, std::make_unique<RecompilationFinishedCallback>(
|
||||
recompilation_finished_semaphore));
|
||||
|
||||
constexpr JobDelegate* kNoDelegate = nullptr;
|
||||
ExecuteCompilationUnits(compilation_state->native_module_weak(),
|
||||
compilation_state->counters(), kNoDelegate,
|
||||
kBaselineOnly);
|
||||
recompilation_finished_semaphore->Wait();
|
||||
DCHECK(!compilation_state->failed());
|
||||
}
|
||||
|
||||
AsyncCompileJob::AsyncCompileJob(
|
||||
Isolate* isolate, WasmFeatures enabled_features,
|
||||
base::OwnedVector<const uint8_t> bytes, Handle<Context> context,
|
||||
@ -2332,11 +2275,13 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
|
||||
// We can only update the feature counts once the entire compile is done.
|
||||
compilation_state->PublishDetectedFeatures(isolate_);
|
||||
|
||||
// We might need to recompile the module for debugging, if the debugger was
|
||||
// enabled while streaming compilation was running. Since handling this while
|
||||
// compiling via streaming is tricky, we just tier down now, before publishing
|
||||
// the module.
|
||||
if (native_module_->IsTieredDown()) native_module_->RecompileForTiering();
|
||||
// We might need debug code for the module, if the debugger was enabled while
|
||||
// streaming compilation was running. Since handling this while compiling via
|
||||
// streaming is tricky, we just remove all code which may have been generated,
|
||||
// and compile debug code lazily.
|
||||
if (native_module_->IsInDebugState()) {
|
||||
native_module_->RemoveAllCompiledCode();
|
||||
}
|
||||
|
||||
// Finally, log all generated code (it does not matter if this happens
|
||||
// repeatedly in case the script is shared).
|
||||
@ -2404,10 +2349,6 @@ class AsyncCompileJob::CompilationStateCallback
|
||||
job_->DoSync<Fail>();
|
||||
}
|
||||
break;
|
||||
case CompilationEvent::kFinishedRecompilation:
|
||||
// This event can happen out of order, hence don't remember this in
|
||||
// {last_event_}.
|
||||
return;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
last_event_ = event;
|
||||
@ -3167,7 +3108,8 @@ void CompilationStateImpl::InitializeCompilationProgress(
|
||||
|
||||
// Compute the default compilation progress for all functions, and set it.
|
||||
const ExecutionTierPair default_tiers = GetDefaultTiersPerModule(
|
||||
native_module_, dynamic_tiering_, IsLazyModule(module));
|
||||
native_module_, dynamic_tiering_, native_module_->IsInDebugState(),
|
||||
IsLazyModule(module));
|
||||
const uint8_t default_progress =
|
||||
RequiredBaselineTierField::encode(default_tiers.baseline_tier) |
|
||||
RequiredTopTierField::encode(default_tiers.top_tier) |
|
||||
@ -3247,7 +3189,7 @@ uint8_t CompilationStateImpl::AddCompilationUnitInternal(
|
||||
void CompilationStateImpl::InitializeCompilationUnits(
|
||||
std::unique_ptr<CompilationUnitBuilder> builder) {
|
||||
int offset = native_module_->module()->num_imported_functions;
|
||||
if (native_module_->IsTieredDown()) {
|
||||
if (native_module_->IsInDebugState()) {
|
||||
for (size_t i = 0; i < compilation_progress_.size(); ++i) {
|
||||
int func_index = offset + static_cast<int>(i);
|
||||
builder->AddDebugUnit(func_index);
|
||||
@ -3267,10 +3209,6 @@ void CompilationStateImpl::InitializeCompilationUnits(
|
||||
|
||||
void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder,
|
||||
int func_index) {
|
||||
if (native_module_->IsTieredDown()) {
|
||||
builder->AddDebugUnit(func_index);
|
||||
return;
|
||||
}
|
||||
int offset = native_module_->module()->num_imported_functions;
|
||||
int progress_index = func_index - offset;
|
||||
uint8_t function_progress;
|
||||
@ -3339,7 +3277,8 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
|
||||
// Update compilation state for eagerly compiled functions.
|
||||
constexpr bool kNotLazy = false;
|
||||
ExecutionTierPair default_tiers =
|
||||
GetDefaultTiersPerModule(native_module_, dynamic_tiering_, kNotLazy);
|
||||
GetDefaultTiersPerModule(native_module_, dynamic_tiering_,
|
||||
native_module_->IsInDebugState(), kNotLazy);
|
||||
uint8_t progress_for_eager_functions =
|
||||
RequiredBaselineTierField::encode(default_tiers.baseline_tier) |
|
||||
RequiredTopTierField::encode(default_tiers.top_tier) |
|
||||
@ -3368,87 +3307,6 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
|
||||
WaitForCompilationEvent(CompilationEvent::kFinishedBaselineCompilation);
|
||||
}
|
||||
|
||||
void CompilationStateImpl::InitializeRecompilation(
|
||||
TieringState new_tiering_state,
|
||||
std::unique_ptr<CompilationEventCallback> recompilation_finished_callback) {
|
||||
DCHECK(!failed());
|
||||
|
||||
// Hold the mutex as long as possible, to synchronize between multiple
|
||||
// recompilations that are triggered at the same time (e.g. when the profiler
|
||||
// is disabled).
|
||||
base::Optional<base::MutexGuard> guard(&callbacks_mutex_);
|
||||
|
||||
// As long as there are outstanding recompilation functions, take part in
|
||||
// compilation. This is to avoid recompiling for the same tier or for
|
||||
// different tiers concurrently. Note that the compilation unit queues can run
|
||||
// empty before {outstanding_recompilation_functions_} drops to zero. In this
|
||||
// case, we do not wait for the last running compilation threads to finish
|
||||
// their units, but just start our own recompilation already.
|
||||
while (outstanding_recompilation_functions_ > 0 &&
|
||||
compilation_unit_queues_.GetTotalSize() > 0) {
|
||||
guard.reset();
|
||||
constexpr JobDelegate* kNoDelegate = nullptr;
|
||||
ExecuteCompilationUnits(native_module_weak_, async_counters_.get(),
|
||||
kNoDelegate, kBaselineOrTopTier);
|
||||
guard.emplace(&callbacks_mutex_);
|
||||
}
|
||||
|
||||
// Information about compilation progress is shared between this class and the
|
||||
// NativeModule. Before updating information here, consult the NativeModule to
|
||||
// find all functions that need recompilation.
|
||||
// Since the current tiering state is updated on the NativeModule before
|
||||
// triggering recompilation, it's OK if the information is slightly outdated.
|
||||
// If we compile functions twice, the NativeModule will ignore all redundant
|
||||
// code (or code compiled for the wrong tier).
|
||||
std::vector<int> recompile_function_indexes =
|
||||
native_module_->FindFunctionsToRecompile(new_tiering_state);
|
||||
|
||||
callbacks_.emplace_back(std::move(recompilation_finished_callback));
|
||||
tiering_state_ = new_tiering_state;
|
||||
|
||||
// If compilation progress is not initialized yet, then compilation didn't
|
||||
// start yet, and new code will be kept tiered-down from the start. For
|
||||
// streaming compilation, there is a special path to tier down later, when
|
||||
// the module is complete. In any case, we don't need to recompile here.
|
||||
base::Optional<CompilationUnitBuilder> builder;
|
||||
if (compilation_progress_.size() > 0) {
|
||||
builder.emplace(native_module_);
|
||||
const WasmModule* module = native_module_->module();
|
||||
DCHECK_EQ(module->num_declared_functions, compilation_progress_.size());
|
||||
DCHECK_GE(module->num_declared_functions,
|
||||
recompile_function_indexes.size());
|
||||
outstanding_recompilation_functions_ =
|
||||
static_cast<int>(recompile_function_indexes.size());
|
||||
// Restart recompilation if another recompilation is already happening.
|
||||
for (auto& progress : compilation_progress_) {
|
||||
progress = MissingRecompilationField::update(progress, false);
|
||||
}
|
||||
auto new_tier = new_tiering_state == kTieredDown ? ExecutionTier::kLiftoff
|
||||
: ExecutionTier::kTurbofan;
|
||||
int imported = module->num_imported_functions;
|
||||
// Generate necessary compilation units on the fly.
|
||||
for (int function_index : recompile_function_indexes) {
|
||||
DCHECK_LE(imported, function_index);
|
||||
int slot_index = function_index - imported;
|
||||
auto& progress = compilation_progress_[slot_index];
|
||||
progress = MissingRecompilationField::update(progress, true);
|
||||
builder->AddRecompilationUnit(function_index, new_tier);
|
||||
}
|
||||
}
|
||||
|
||||
// Trigger callback if module needs no recompilation.
|
||||
if (outstanding_recompilation_functions_ == 0) {
|
||||
TriggerCallbacks(base::EnumSet<CompilationEvent>(
|
||||
{CompilationEvent::kFinishedRecompilation}));
|
||||
}
|
||||
|
||||
if (builder.has_value()) {
|
||||
// Avoid holding lock while scheduling a compile job.
|
||||
guard.reset();
|
||||
builder->Commit();
|
||||
}
|
||||
}
|
||||
|
||||
void CompilationStateImpl::AddCallback(
|
||||
std::unique_ptr<CompilationEventCallback> callback) {
|
||||
base::MutexGuard callbacks_guard(&callbacks_mutex_);
|
||||
@ -3610,25 +3468,6 @@ void CompilationStateImpl::OnFinishedUnits(
|
||||
bytes_since_last_chunk_ += code->instructions().size();
|
||||
}
|
||||
|
||||
if (V8_UNLIKELY(MissingRecompilationField::decode(function_progress))) {
|
||||
DCHECK_LT(0, outstanding_recompilation_functions_);
|
||||
// If tiering up, accept any TurboFan code. For tiering down, look at
|
||||
// the {for_debugging} flag. The tier can be Liftoff or TurboFan and is
|
||||
// irrelevant here. In particular, we want to ignore any outstanding
|
||||
// non-debugging units.
|
||||
bool matches = tiering_state_ == kTieredDown
|
||||
? code->for_debugging()
|
||||
: code->tier() == ExecutionTier::kTurbofan;
|
||||
if (matches) {
|
||||
outstanding_recompilation_functions_--;
|
||||
compilation_progress_[slot_index] = MissingRecompilationField::update(
|
||||
compilation_progress_[slot_index], false);
|
||||
if (outstanding_recompilation_functions_ == 0) {
|
||||
triggered_events.Add(CompilationEvent::kFinishedRecompilation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update function's compilation progress.
|
||||
if (code->tier() > reached_tier) {
|
||||
compilation_progress_[slot_index] = ReachedTierField::update(
|
||||
@ -3678,11 +3517,9 @@ void CompilationStateImpl::TriggerCallbacks(
|
||||
|
||||
// Don't trigger past events again.
|
||||
triggered_events -= finished_events_;
|
||||
// Recompilation can happen multiple times, thus do not store this. There can
|
||||
// also be multiple compilation chunks.
|
||||
finished_events_ |= triggered_events -
|
||||
CompilationEvent::kFinishedRecompilation -
|
||||
CompilationEvent::kFinishedCompilationChunk;
|
||||
// There can be multiple compilation chunks, thus do not store this.
|
||||
finished_events_ |=
|
||||
triggered_events - CompilationEvent::kFinishedCompilationChunk;
|
||||
|
||||
for (auto event :
|
||||
{std::make_pair(CompilationEvent::kFailedCompilation,
|
||||
@ -3692,9 +3529,7 @@ void CompilationStateImpl::TriggerCallbacks(
|
||||
std::make_pair(CompilationEvent::kFinishedBaselineCompilation,
|
||||
"wasm.BaselineFinished"),
|
||||
std::make_pair(CompilationEvent::kFinishedCompilationChunk,
|
||||
"wasm.CompilationChunkFinished"),
|
||||
std::make_pair(CompilationEvent::kFinishedRecompilation,
|
||||
"wasm.RecompilationFinished")}) {
|
||||
"wasm.CompilationChunkFinished")}) {
|
||||
if (!triggered_events.contains(event.first)) continue;
|
||||
DCHECK_NE(compilation_id_, kInvalidCompilationID);
|
||||
TRACE_EVENT1("v8.wasm", event.second, "id", compilation_id_);
|
||||
@ -3703,8 +3538,7 @@ void CompilationStateImpl::TriggerCallbacks(
|
||||
}
|
||||
}
|
||||
|
||||
if (outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
|
||||
outstanding_recompilation_functions_ == 0) {
|
||||
if (outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0) {
|
||||
auto new_end = std::remove_if(
|
||||
callbacks_.begin(), callbacks_.end(), [](const auto& callback) {
|
||||
return callback->release_after_final_event();
|
||||
@ -4017,7 +3851,7 @@ WasmCode* CompileImportWrapper(
|
||||
result.tagged_parameter_slots,
|
||||
result.protected_instructions_data.as_vector(),
|
||||
result.source_positions.as_vector(), GetCodeKind(result),
|
||||
ExecutionTier::kNone, kNoDebugging);
|
||||
ExecutionTier::kNone, kNotForDebugging);
|
||||
published_code = native_module->PublishCode(std::move(wasm_code));
|
||||
}
|
||||
(*cache_scope)[key] = published_code;
|
||||
|
@ -61,9 +61,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
|
||||
int compilation_id, v8::metrics::Recorder::ContextId context_id,
|
||||
ProfileInformation* pgo_info);
|
||||
|
||||
void RecompileNativeModule(NativeModule* native_module,
|
||||
TieringState new_tiering_state);
|
||||
|
||||
V8_EXPORT_PRIVATE
|
||||
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module);
|
||||
|
||||
|
@ -1144,7 +1144,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
|
||||
source_pos.as_vector(), // source positions
|
||||
WasmCode::kWasmFunction, // kind
|
||||
ExecutionTier::kNone, // tier
|
||||
kNoDebugging}}; // for_debugging
|
||||
kNotForDebugging}}; // for_debugging
|
||||
new_code->MaybePrint();
|
||||
new_code->Validate();
|
||||
|
||||
@ -1344,20 +1344,10 @@ WasmCode* NativeModule::PublishCodeLocked(
|
||||
// code table of jump table). Otherwise, install code if it was compiled
|
||||
// with a higher tier.
|
||||
static_assert(
|
||||
kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
|
||||
kForDebugging > kNotForDebugging && kWithBreakpoints > kForDebugging,
|
||||
"for_debugging is ordered");
|
||||
const bool update_code_table =
|
||||
// Never install stepping code.
|
||||
code->for_debugging() != kForStepping &&
|
||||
(!prior_code ||
|
||||
(tiering_state_ == kTieredDown
|
||||
// Tiered down: Install breakpoints over normal debug code.
|
||||
? prior_code->for_debugging() <= code->for_debugging()
|
||||
// Tiered up: Install if the tier is higher than before or we
|
||||
// replace debugging code with non-debugging code.
|
||||
: (prior_code->tier() < code->tier() ||
|
||||
(prior_code->for_debugging() && !code->for_debugging()))));
|
||||
if (update_code_table) {
|
||||
|
||||
if (should_update_code_table(code, prior_code)) {
|
||||
code_table_[slot_idx] = code;
|
||||
if (prior_code) {
|
||||
WasmCodeRefScope::AddRef(prior_code);
|
||||
@ -1377,6 +1367,32 @@ WasmCode* NativeModule::PublishCodeLocked(
|
||||
return code;
|
||||
}
|
||||
|
||||
bool NativeModule::should_update_code_table(WasmCode* new_code,
|
||||
WasmCode* prior_code) const {
|
||||
if (new_code->for_debugging() == kForStepping) {
|
||||
// Never install stepping code.
|
||||
return false;
|
||||
}
|
||||
if (debug_state_ == kDebugging) {
|
||||
if (new_code->for_debugging() == kNotForDebugging) {
|
||||
// In debug state, only install debug code.
|
||||
return false;
|
||||
}
|
||||
if (prior_code && prior_code->for_debugging() > new_code->for_debugging()) {
|
||||
// In debug state, install breakpoints over normal debug code.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// In kNoDebugging:
|
||||
// Install if the tier is higher than before or we replace debugging code with
|
||||
// non-debugging code.
|
||||
if (prior_code && !prior_code->for_debugging() &&
|
||||
prior_code->tier() > new_code->tier()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void NativeModule::ReinstallDebugCode(WasmCode* code) {
|
||||
base::RecursiveMutexGuard lock(&allocation_mutex_);
|
||||
|
||||
@ -1387,7 +1403,7 @@ void NativeModule::ReinstallDebugCode(WasmCode* code) {
|
||||
DCHECK_LT(code->index(), num_functions());
|
||||
|
||||
// If the module is tiered up by now, do not reinstall debug code.
|
||||
if (tiering_state_ != kTieredDown) return;
|
||||
if (debug_state_ != kDebugging) return;
|
||||
|
||||
uint32_t slot_idx = declared_function_index(module(), code->index());
|
||||
if (WasmCode* prior_code = code_table_[slot_idx]) {
|
||||
@ -1422,13 +1438,13 @@ std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
|
||||
base::Vector<const byte> reloc_info,
|
||||
base::Vector<const byte> source_position_table, WasmCode::Kind kind,
|
||||
ExecutionTier tier) {
|
||||
UpdateCodeSize(instructions.size(), tier, kNoDebugging);
|
||||
UpdateCodeSize(instructions.size(), tier, kNotForDebugging);
|
||||
|
||||
return std::unique_ptr<WasmCode>{new WasmCode{
|
||||
this, index, instructions, stack_slots, tagged_parameter_slots,
|
||||
safepoint_table_offset, handler_table_offset, constant_pool_offset,
|
||||
code_comments_offset, unpadded_binary_size, protected_instructions_data,
|
||||
reloc_info, source_position_table, kind, tier, kNoDebugging}};
|
||||
reloc_info, source_position_table, kind, tier, kNotForDebugging}};
|
||||
}
|
||||
|
||||
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
|
||||
@ -1506,7 +1522,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
|
||||
base::Vector<uint8_t> code_space =
|
||||
code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
|
||||
DCHECK(!code_space.empty());
|
||||
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
|
||||
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNotForDebugging);
|
||||
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
|
||||
std::unique_ptr<WasmCode> code{
|
||||
new WasmCode{this, // native_module
|
||||
@ -1524,13 +1540,13 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
|
||||
{}, // source_pos
|
||||
WasmCode::kJumpTable, // kind
|
||||
ExecutionTier::kNone, // tier
|
||||
kNoDebugging}}; // for_debugging
|
||||
kNotForDebugging}}; // for_debugging
|
||||
return PublishCodeLocked(std::move(code));
|
||||
}
|
||||
|
||||
void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
|
||||
ForDebugging for_debugging) {
|
||||
if (for_debugging != kNoDebugging) return;
|
||||
if (for_debugging != kNotForDebugging) return;
|
||||
// Count jump tables (ExecutionTier::kNone) for both Liftoff and TurboFan as
|
||||
// this is shared code.
|
||||
if (tier != ExecutionTier::kTurbofan) liftoff_code_size_.fetch_add(size);
|
||||
@ -2401,17 +2417,12 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
|
||||
return generated_code;
|
||||
}
|
||||
|
||||
void NativeModule::SetTieringState(TieringState new_tiering_state) {
|
||||
void NativeModule::SetDebugState(DebugState new_debug_state) {
|
||||
// Do not tier down asm.js (just never change the tiering state).
|
||||
if (module()->origin != kWasmOrigin) return;
|
||||
|
||||
base::RecursiveMutexGuard lock(&allocation_mutex_);
|
||||
tiering_state_ = new_tiering_state;
|
||||
}
|
||||
|
||||
bool NativeModule::IsTieredDown() {
|
||||
base::RecursiveMutexGuard lock(&allocation_mutex_);
|
||||
return tiering_state_ == kTieredDown;
|
||||
debug_state_ = new_debug_state;
|
||||
}
|
||||
|
||||
void NativeModule::RemoveAllCompiledCode() {
|
||||
@ -2426,75 +2437,6 @@ void NativeModule::RemoveAllCompiledCode() {
|
||||
}
|
||||
}
|
||||
|
||||
void NativeModule::RecompileForTiering() {
|
||||
// If baseline compilation is not finished yet, we do not tier down now. This
|
||||
// would be tricky because not all code is guaranteed to be available yet.
|
||||
// Instead, we tier down after streaming compilation finished.
|
||||
if (!compilation_state_->baseline_compilation_finished()) return;
|
||||
|
||||
// Read the tiering state under the lock, then trigger recompilation after
|
||||
// releasing the lock. If the tiering state was changed when the triggered
|
||||
// compilation units finish, code installation will handle that correctly.
|
||||
TieringState current_state;
|
||||
{
|
||||
base::RecursiveMutexGuard lock(&allocation_mutex_);
|
||||
current_state = tiering_state_;
|
||||
|
||||
// Initialize {cached_code_} to signal that this cache should get filled
|
||||
// from now on.
|
||||
if (!cached_code_) {
|
||||
cached_code_ = std::make_unique<
|
||||
std::map<std::pair<ExecutionTier, int>, WasmCode*>>();
|
||||
// Fill with existing code.
|
||||
for (auto& code_entry : owned_code_) {
|
||||
InsertToCodeCache(code_entry.second.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
RecompileNativeModule(this, current_state);
|
||||
}
|
||||
|
||||
std::vector<int> NativeModule::FindFunctionsToRecompile(
|
||||
TieringState new_tiering_state) {
|
||||
WasmCodeRefScope code_ref_scope;
|
||||
base::RecursiveMutexGuard guard(&allocation_mutex_);
|
||||
// Get writable permission already here (and not inside the loop in
|
||||
// {PatchJumpTablesLocked}), to avoid switching for each slot individually.
|
||||
CodeSpaceWriteScope code_space_write_scope(this);
|
||||
std::vector<int> function_indexes;
|
||||
int imported = module()->num_imported_functions;
|
||||
int declared = module()->num_declared_functions;
|
||||
const bool tier_down = new_tiering_state == kTieredDown;
|
||||
for (int slot_index = 0; slot_index < declared; ++slot_index) {
|
||||
int function_index = imported + slot_index;
|
||||
WasmCode* old_code = code_table_[slot_index];
|
||||
bool code_is_good =
|
||||
tier_down ? old_code && old_code->for_debugging()
|
||||
: old_code && old_code->tier() == ExecutionTier::kTurbofan;
|
||||
if (code_is_good) continue;
|
||||
DCHECK_NOT_NULL(cached_code_);
|
||||
auto cache_it = cached_code_->find(std::make_pair(
|
||||
tier_down ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan,
|
||||
function_index));
|
||||
if (cache_it != cached_code_->end()) {
|
||||
WasmCode* cached_code = cache_it->second;
|
||||
if (old_code) {
|
||||
WasmCodeRefScope::AddRef(old_code);
|
||||
// The code is added to the current {WasmCodeRefScope}, hence the ref
|
||||
// count cannot drop to zero here.
|
||||
old_code->DecRefOnLiveCode();
|
||||
}
|
||||
code_table_[slot_index] = cached_code;
|
||||
PatchJumpTablesLocked(slot_index, cached_code->instruction_start());
|
||||
cached_code->IncRef();
|
||||
continue;
|
||||
}
|
||||
// Otherwise add the function to the set of functions to recompile.
|
||||
function_indexes.push_back(function_index);
|
||||
}
|
||||
return function_indexes;
|
||||
}
|
||||
|
||||
void NativeModule::FreeCode(base::Vector<WasmCode* const> codes) {
|
||||
base::RecursiveMutexGuard guard(&allocation_mutex_);
|
||||
// Free the code space.
|
||||
|
@ -823,29 +823,20 @@ class V8_EXPORT_PRIVATE NativeModule final {
|
||||
V8_WARN_UNUSED_RESULT std::vector<std::unique_ptr<WasmCode>> AddCompiledCode(
|
||||
base::Vector<WasmCompilationResult>);
|
||||
|
||||
// Set a new tiering state, but don't trigger any recompilation yet; use
|
||||
// {RecompileForTiering} for that. The two steps are split because In some
|
||||
// scenarios we need to drop locks before triggering recompilation.
|
||||
void SetTieringState(TieringState);
|
||||
// Set a new debugging state, but don't trigger any recompilation;
|
||||
// recompilation happens lazily.
|
||||
void SetDebugState(DebugState);
|
||||
|
||||
// Check whether this modules is tiered down for debugging.
|
||||
bool IsTieredDown();
|
||||
// Check whether this modules is in debug state.
|
||||
DebugState IsInDebugState() const {
|
||||
base::RecursiveMutexGuard lock(&allocation_mutex_);
|
||||
return debug_state_;
|
||||
}
|
||||
|
||||
// Remove all compiled code from the {NativeModule} and replace it with
|
||||
// {CompileLazy} builtins.
|
||||
void RemoveAllCompiledCode();
|
||||
|
||||
// Fully recompile this module in the tier set previously via
|
||||
// {SetTieringState}. The calling thread contributes to compilation and only
|
||||
// returns once recompilation is done.
|
||||
void RecompileForTiering();
|
||||
|
||||
// Find all functions that need to be recompiled for a new tier. Note that
|
||||
// compilation jobs might run concurrently, so this method only considers the
|
||||
// compilation state of this native module at the time of the call.
|
||||
// Returns a vector of function indexes to recompile.
|
||||
std::vector<int> FindFunctionsToRecompile(TieringState);
|
||||
|
||||
// Free a set of functions of this module. Uncommits whole pages if possible.
|
||||
// The given vector must be ordered by the instruction start address, and all
|
||||
// {WasmCode} objects must not be used any more.
|
||||
@ -923,6 +914,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
|
||||
// not have code in the cache yet.
|
||||
void InsertToCodeCache(WasmCode* code);
|
||||
|
||||
bool should_update_code_table(WasmCode* new_code, WasmCode* prior_code) const;
|
||||
|
||||
// -- Fields of {NativeModule} start here.
|
||||
|
||||
// Keep the engine alive as long as this NativeModule is alive. In its
|
||||
@ -1012,7 +1005,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
|
||||
|
||||
std::unique_ptr<NamesProvider> names_provider_;
|
||||
|
||||
TieringState tiering_state_ = kTieredUp;
|
||||
DebugState debug_state_ = kNotDebugging;
|
||||
|
||||
// Cache both baseline and top-tier code if we are debugging, to speed up
|
||||
// repeated enabling/disabling of the debugger or profiler.
|
||||
|
@ -390,8 +390,8 @@ struct WasmEngine::IsolateInfo {
|
||||
|
||||
const std::shared_ptr<Counters> async_counters;
|
||||
|
||||
// Keep new modules in tiered down state.
|
||||
bool keep_tiered_down = false;
|
||||
// Keep new modules in debug state.
|
||||
bool keep_in_debug_state = false;
|
||||
|
||||
// Keep track whether we already added a sample for PKU support (we only want
|
||||
// one sample per Isolate).
|
||||
@ -714,22 +714,30 @@ void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
|
||||
&native_module->module()->functions[function_index], tier);
|
||||
}
|
||||
|
||||
void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
|
||||
std::vector<std::shared_ptr<NativeModule>> native_modules;
|
||||
void WasmEngine::EnterDebuggingForIsolate(Isolate* isolate) {
|
||||
std::vector<std::pair<std::shared_ptr<NativeModule>, DebugState>>
|
||||
native_modules;
|
||||
// {mutex_} gets taken both here and in {RemoveAllCompiledCode} in
|
||||
// {AddPotentiallyDeadCode}. Therefore {RemoveAllCompiledCode} has to be
|
||||
// called outside the lock.
|
||||
{
|
||||
base::MutexGuard lock(&mutex_);
|
||||
if (isolates_[isolate]->keep_tiered_down) return;
|
||||
isolates_[isolate]->keep_tiered_down = true;
|
||||
if (isolates_[isolate]->keep_in_debug_state) return;
|
||||
isolates_[isolate]->keep_in_debug_state = true;
|
||||
for (auto* native_module : isolates_[isolate]->native_modules) {
|
||||
native_module->SetTieringState(kTieredDown);
|
||||
DCHECK_EQ(1, native_modules_.count(native_module));
|
||||
if (auto shared_ptr = native_modules_[native_module]->weak_ptr.lock()) {
|
||||
native_modules.emplace_back(std::move(shared_ptr));
|
||||
native_modules.emplace_back(std::make_pair(
|
||||
std::move(shared_ptr), native_module->IsInDebugState()));
|
||||
}
|
||||
native_module->SetDebugState(kDebugging);
|
||||
}
|
||||
}
|
||||
for (auto& native_module : native_modules) {
|
||||
native_module->RecompileForTiering();
|
||||
for (auto& pair : native_modules) {
|
||||
DebugState is_in_debug_state = pair.second;
|
||||
if (!is_in_debug_state) {
|
||||
pair.first->RemoveAllCompiledCode();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -740,12 +748,12 @@ void WasmEngine::LeaveDebuggingForIsolate(Isolate* isolate) {
|
||||
std::vector<std::pair<std::shared_ptr<NativeModule>, bool>> native_modules;
|
||||
{
|
||||
base::MutexGuard lock(&mutex_);
|
||||
isolates_[isolate]->keep_tiered_down = false;
|
||||
isolates_[isolate]->keep_in_debug_state = false;
|
||||
auto can_remove_debug_code = [this](NativeModule* native_module) {
|
||||
DCHECK_EQ(1, native_modules_.count(native_module));
|
||||
for (auto* isolate : native_modules_[native_module]->isolates) {
|
||||
DCHECK_EQ(1, isolates_.count(isolate));
|
||||
if (isolates_[isolate]->keep_tiered_down) return false;
|
||||
if (isolates_[isolate]->keep_in_debug_state) return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
@ -753,11 +761,11 @@ void WasmEngine::LeaveDebuggingForIsolate(Isolate* isolate) {
|
||||
DCHECK_EQ(1, native_modules_.count(native_module));
|
||||
auto shared_ptr = native_modules_[native_module]->weak_ptr.lock();
|
||||
if (!shared_ptr) continue; // The module is not used any more.
|
||||
if (!native_module->IsTieredDown()) continue;
|
||||
if (!native_module->IsInDebugState()) continue;
|
||||
// Only start tier-up if no other isolate needs this module in tiered
|
||||
// down state.
|
||||
bool remove_debug_code = can_remove_debug_code(native_module);
|
||||
if (remove_debug_code) native_module->SetTieringState(kTieredUp);
|
||||
if (remove_debug_code) native_module->SetDebugState(kNotDebugging);
|
||||
native_modules.emplace_back(std::move(shared_ptr), remove_debug_code);
|
||||
}
|
||||
}
|
||||
@ -768,7 +776,9 @@ void WasmEngine::LeaveDebuggingForIsolate(Isolate* isolate) {
|
||||
if (native_module->HasDebugInfo()) {
|
||||
native_module->GetDebugInfo()->RemoveIsolate(isolate);
|
||||
}
|
||||
if (remove_debug_code) native_module->RemoveAllCompiledCode();
|
||||
if (remove_debug_code) {
|
||||
native_module->RemoveAllCompiledCode();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1205,8 +1215,8 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
|
||||
pair.first->second.get()->isolates.insert(isolate);
|
||||
auto* isolate_info = isolates_[isolate].get();
|
||||
isolate_info->native_modules.insert(native_module.get());
|
||||
if (isolate_info->keep_tiered_down) {
|
||||
native_module->SetTieringState(kTieredDown);
|
||||
if (isolate_info->keep_in_debug_state) {
|
||||
native_module->SetDebugState(kDebugging);
|
||||
}
|
||||
|
||||
// Record memory protection key support.
|
||||
@ -1232,7 +1242,7 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
|
||||
wire_bytes.size());
|
||||
std::shared_ptr<NativeModule> native_module =
|
||||
native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
|
||||
bool recompile_module = false;
|
||||
bool remove_all_code = false;
|
||||
if (native_module) {
|
||||
TRACE_EVENT0("v8.wasm", "CacheHit");
|
||||
base::MutexGuard guard(&mutex_);
|
||||
@ -1242,13 +1252,15 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
|
||||
}
|
||||
native_module_info->isolates.insert(isolate);
|
||||
isolates_[isolate]->native_modules.insert(native_module.get());
|
||||
if (isolates_[isolate]->keep_tiered_down) {
|
||||
native_module->SetTieringState(kTieredDown);
|
||||
recompile_module = true;
|
||||
if (isolates_[isolate]->keep_in_debug_state &&
|
||||
!native_module->IsInDebugState()) {
|
||||
remove_all_code = true;
|
||||
native_module->SetDebugState(kDebugging);
|
||||
}
|
||||
}
|
||||
// Potentially recompile the module for tier down, after releasing the mutex.
|
||||
if (recompile_module) native_module->RecompileForTiering();
|
||||
if (remove_all_code) {
|
||||
native_module->RemoveAllCompiledCode();
|
||||
}
|
||||
return native_module;
|
||||
}
|
||||
|
||||
@ -1261,21 +1273,22 @@ std::shared_ptr<NativeModule> WasmEngine::UpdateNativeModuleCache(
|
||||
native_module =
|
||||
native_module_cache_.Update(std::move(native_module), has_error);
|
||||
if (prev == native_module.get()) return native_module;
|
||||
|
||||
bool recompile_module = false;
|
||||
bool remove_all_code = false;
|
||||
{
|
||||
base::MutexGuard guard(&mutex_);
|
||||
DCHECK_EQ(1, native_modules_.count(native_module.get()));
|
||||
native_modules_[native_module.get()]->isolates.insert(isolate);
|
||||
DCHECK_EQ(1, isolates_.count(isolate));
|
||||
isolates_[isolate]->native_modules.insert(native_module.get());
|
||||
if (isolates_[isolate]->keep_tiered_down) {
|
||||
native_module->SetTieringState(kTieredDown);
|
||||
recompile_module = true;
|
||||
if (isolates_[isolate]->keep_in_debug_state &&
|
||||
!native_module->IsInDebugState()) {
|
||||
remove_all_code = true;
|
||||
native_module->SetDebugState(kDebugging);
|
||||
}
|
||||
}
|
||||
// Potentially recompile the module for tier down, after releasing the mutex.
|
||||
if (recompile_module) native_module->RecompileForTiering();
|
||||
if (remove_all_code) {
|
||||
native_module->RemoveAllCompiledCode();
|
||||
}
|
||||
return native_module;
|
||||
}
|
||||
|
||||
|
@ -204,7 +204,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
|
||||
void CompileFunction(Isolate* isolate, NativeModule* native_module,
|
||||
uint32_t function_index, ExecutionTier tier);
|
||||
|
||||
void TierDownAllModulesPerIsolate(Isolate* isolate);
|
||||
void EnterDebuggingForIsolate(Isolate* isolate);
|
||||
|
||||
void LeaveDebuggingForIsolate(Isolate* isolate);
|
||||
|
||||
|
@ -1446,7 +1446,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
|
||||
result.tagged_parameter_slots,
|
||||
result.protected_instructions_data.as_vector(),
|
||||
result.source_positions.as_vector(), GetCodeKind(result),
|
||||
wasm::ExecutionTier::kNone, wasm::kNoDebugging);
|
||||
wasm::ExecutionTier::kNone, wasm::kNotForDebugging);
|
||||
wasm::WasmCode* published_code =
|
||||
native_module->PublishCode(std::move(wasm_code));
|
||||
isolate->counters()->wasm_generated_code_size()->Increment(
|
||||
|
@ -907,7 +907,7 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
|
||||
isolate, enabled_features, std::move(module), code_size_estimate);
|
||||
// We have to assign a compilation ID here, as it is required for a
|
||||
// potential re-compilation, e.g. triggered by
|
||||
// {TierDownAllModulesPerIsolate}. The value is -2 so that it is different
|
||||
// {EnterDebuggingForIsolate}. The value is -2 so that it is different
|
||||
// than the compilation ID of actual compilations, and also different than
|
||||
// the sentinel value of the CompilationState.
|
||||
shared_native_module->compilation_state()->set_compilation_id(-2);
|
||||
|
@ -37,13 +37,13 @@ inline const char* ExecutionTierToString(ExecutionTier tier) {
|
||||
// the code also contains breakpoints, and {kForStepping} for code that is
|
||||
// flooded with breakpoints.
|
||||
enum ForDebugging : int8_t {
|
||||
kNoDebugging = 0,
|
||||
kNotForDebugging = 0,
|
||||
kForDebugging,
|
||||
kWithBreakpoints,
|
||||
kForStepping
|
||||
};
|
||||
|
||||
enum TieringState : int8_t { kTieredUp, kTieredDown };
|
||||
enum DebugState : bool { kNotDebugging = false, kDebugging = true };
|
||||
|
||||
} // namespace wasm
|
||||
} // namespace internal
|
||||
|
@ -26,7 +26,7 @@ class LiftoffCompileEnvironment {
|
||||
// Add a table of length 1, for indirect calls.
|
||||
wasm_runner_.builder().AddIndirectFunctionTable(nullptr, 1);
|
||||
// Set tiered down such that we generate debugging code.
|
||||
wasm_runner_.builder().SetTieredDown();
|
||||
wasm_runner_.builder().SetDebugState();
|
||||
}
|
||||
|
||||
struct TestFunction {
|
||||
|
@ -3954,7 +3954,7 @@ TEST(Liftoff_tier_up) {
|
||||
CodeSpaceWriteScope write_scope(native_module);
|
||||
std::unique_ptr<WasmCode> new_code = native_module->AddCode(
|
||||
add.function_index(), desc, 0, 0, {}, {}, WasmCode::kWasmFunction,
|
||||
ExecutionTier::kTurbofan, kNoDebugging);
|
||||
ExecutionTier::kTurbofan, kNotForDebugging);
|
||||
native_module->PublishCode(std::move(new_code));
|
||||
}
|
||||
|
||||
|
@ -1601,7 +1601,7 @@ STREAM_TEST(TierDownWithError) {
|
||||
builder.WriteTo(&buffer);
|
||||
}
|
||||
|
||||
GetWasmEngine()->TierDownAllModulesPerIsolate(i_isolate);
|
||||
GetWasmEngine()->EnterDebuggingForIsolate(i_isolate);
|
||||
|
||||
tester.OnBytesReceived(buffer.begin(), buffer.size());
|
||||
tester.FinishStream();
|
||||
|
@ -140,7 +140,7 @@ class BreakHandler : public debug::DebugDelegate {
|
||||
Handle<BreakPoint> SetBreakpoint(WasmRunnerBase* runner, int function_index,
|
||||
int byte_offset,
|
||||
int expected_set_byte_offset = -1) {
|
||||
runner->TierDown();
|
||||
runner->SwitchToDebug();
|
||||
int func_offset =
|
||||
runner->builder().GetFunctionAt(function_index)->code.offset();
|
||||
int code_offset = func_offset + byte_offset;
|
||||
@ -370,7 +370,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleStepping) {
|
||||
|
||||
WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
|
||||
WasmRunner<int, int> runner(execution_tier);
|
||||
runner.TierDown();
|
||||
runner.SwitchToDebug();
|
||||
WasmFunctionCompiler& f2 = runner.NewFunction<void>();
|
||||
f2.AllocateLocal(kWasmI32);
|
||||
|
||||
|
@ -342,10 +342,11 @@ TEST(TierDownAfterDeserialization) {
|
||||
CHECK_NOT_NULL(turbofan_code);
|
||||
CHECK_EQ(ExecutionTier::kTurbofan, turbofan_code->tier());
|
||||
|
||||
GetWasmEngine()->TierDownAllModulesPerIsolate(isolate);
|
||||
GetWasmEngine()->EnterDebuggingForIsolate(isolate);
|
||||
|
||||
auto* liftoff_code = native_module->GetCode(0);
|
||||
CHECK_EQ(ExecutionTier::kLiftoff, liftoff_code->tier());
|
||||
// Entering debugging should delete all code, so that debug code gets compiled
|
||||
// lazily.
|
||||
CHECK_NULL(native_module->GetCode(0));
|
||||
}
|
||||
|
||||
TEST(SerializeLiftoffModuleFails) {
|
||||
|
@ -616,7 +616,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
|
||||
NativeModule* native_module =
|
||||
builder_->instance_object()->module_object().native_module();
|
||||
ForDebugging for_debugging =
|
||||
native_module->IsTieredDown() ? kForDebugging : kNoDebugging;
|
||||
native_module->IsInDebugState() ? kForDebugging : kNotForDebugging;
|
||||
|
||||
base::Optional<WasmCompilationResult> result;
|
||||
if (builder_->test_execution_tier() ==
|
||||
|
@ -242,14 +242,14 @@ class TestingModuleBuilder {
|
||||
return reinterpret_cast<Address>(globals_data_);
|
||||
}
|
||||
|
||||
void SetTieredDown() {
|
||||
native_module_->SetTieringState(kTieredDown);
|
||||
void SetDebugState() {
|
||||
native_module_->SetDebugState(kDebugging);
|
||||
execution_tier_ = TestExecutionTier::kLiftoff;
|
||||
}
|
||||
|
||||
void TierDown() {
|
||||
SetTieredDown();
|
||||
native_module_->RecompileForTiering();
|
||||
void SwitchToDebug() {
|
||||
SetDebugState();
|
||||
native_module_->RemoveAllCompiledCode();
|
||||
}
|
||||
|
||||
CompilationEnv CreateCompilationEnv();
|
||||
@ -469,7 +469,7 @@ class WasmRunnerBase : public InitializedHandleScope {
|
||||
|
||||
bool interpret() { return builder_.interpret(); }
|
||||
|
||||
void TierDown() { builder_.TierDown(); }
|
||||
void SwitchToDebug() { builder_.SwitchToDebug(); }
|
||||
|
||||
template <typename ReturnType, typename... ParamTypes>
|
||||
FunctionSig* CreateSig() {
|
||||
|
@ -17,19 +17,21 @@ function create_builder(delta = 0) {
|
||||
return builder;
|
||||
}
|
||||
|
||||
function checkTieredDown(instance) {
|
||||
function checkDebugCode(instance) {
|
||||
for (let i = 0; i < num_functions; ++i) {
|
||||
assertTrue(%IsLiftoffFunction(instance.exports['f' + i]));
|
||||
// Call the function once because of lazy compilation.
|
||||
instance.exports['f' + i]();
|
||||
assertTrue(%IsWasmDebugFunction(instance.exports['f' + i]));
|
||||
}
|
||||
}
|
||||
|
||||
function waitForTieredUp(instance) {
|
||||
// Busy waiting until all functions are tiered up.
|
||||
function waitForNoDebugCode(instance) {
|
||||
// Busy waiting until all functions left debug mode.
|
||||
let num_liftoff_functions = 0;
|
||||
while (true) {
|
||||
num_liftoff_functions = 0;
|
||||
for (let i = 0; i < num_functions; ++i) {
|
||||
if (%IsLiftoffFunction(instance.exports['f' + i])) {
|
||||
if (%IsWasmDebugFunction(instance.exports['f' + i])) {
|
||||
num_liftoff_functions++;
|
||||
}
|
||||
}
|
||||
@ -39,37 +41,37 @@ function waitForTieredUp(instance) {
|
||||
|
||||
const Debug = new DebugWrapper();
|
||||
|
||||
(function testTierDownToLiftoff() {
|
||||
(function testEnterDebugMode() {
|
||||
// In the 'isolates' test, this test runs in parallel to itself on two
|
||||
// isolates. All checks below should still hold.
|
||||
const instance = create_builder(0).instantiate();
|
||||
Debug.enable();
|
||||
checkTieredDown(instance);
|
||||
checkDebugCode(instance);
|
||||
const instance2 = create_builder(1).instantiate();
|
||||
checkTieredDown(instance2);
|
||||
checkDebugCode(instance2);
|
||||
Debug.disable();
|
||||
// Eventually the instances will be completely tiered up again.
|
||||
waitForTieredUp(instance);
|
||||
waitForTieredUp(instance2);
|
||||
// Eventually the instances will have completely left debug mode again.
|
||||
waitForNoDebugCode(instance);
|
||||
waitForNoDebugCode(instance2);
|
||||
})();
|
||||
|
||||
// Test async compilation.
|
||||
assertPromiseResult((async function testTierDownToLiftoffAsync() {
|
||||
assertPromiseResult((async function testEnterDebugModeAsync() {
|
||||
// First test: enable the debugger *after* compiling the module.
|
||||
const instance = await create_builder(2).asyncInstantiate();
|
||||
Debug.enable();
|
||||
checkTieredDown(instance);
|
||||
checkDebugCode(instance);
|
||||
const instance2 = await create_builder(3).asyncInstantiate();
|
||||
checkTieredDown(instance2);
|
||||
checkDebugCode(instance2);
|
||||
Debug.disable();
|
||||
waitForTieredUp(instance);
|
||||
waitForTieredUp(instance2);
|
||||
waitForNoDebugCode(instance);
|
||||
waitForNoDebugCode(instance2);
|
||||
|
||||
// Second test: enable the debugger *while* compiling the module.
|
||||
const instancePromise = create_builder(4).asyncInstantiate();
|
||||
Debug.enable();
|
||||
const instance3 = await instancePromise;
|
||||
checkTieredDown(instance3);
|
||||
checkDebugCode(instance3);
|
||||
Debug.disable();
|
||||
waitForTieredUp(instance3);
|
||||
waitForNoDebugCode(instance3);
|
||||
})());
|
||||
|
@ -1338,7 +1338,7 @@
|
||||
'wasm/liftoff': [SKIP],
|
||||
'wasm/liftoff-debug': [SKIP],
|
||||
'wasm/tier-up-testing-flag': [SKIP],
|
||||
'wasm/tier-down-to-liftoff': [SKIP],
|
||||
'wasm/enter-debug-state': [SKIP],
|
||||
'wasm/wasm-dynamic-tiering': [SKIP],
|
||||
'wasm/test-partial-serialization': [SKIP],
|
||||
'regress/wasm/regress-1248024': [SKIP],
|
||||
|
@ -18,20 +18,22 @@ function create_builder(delta = 0) {
|
||||
return builder;
|
||||
}
|
||||
|
||||
function checkTieredDown(instance) {
|
||||
function checkForDebugCode(instance) {
|
||||
for (let i = 0; i < num_functions; ++i) {
|
||||
assertTrue(%IsLiftoffFunction(instance.exports['f' + i]));
|
||||
// Call the function once because of lazy compilation.
|
||||
instance.exports['f' + i]();
|
||||
assertTrue(%IsWasmDebugFunction(instance.exports['f' + i]));
|
||||
}
|
||||
}
|
||||
|
||||
function check(instance) {
|
||||
%WasmTierDown();
|
||||
checkTieredDown(instance);
|
||||
%WasmEnterDebugging();
|
||||
checkForDebugCode(instance);
|
||||
|
||||
for (let i = 0; i < num_functions; ++i) {
|
||||
%WasmTierUpFunction(instance, i);
|
||||
}
|
||||
checkTieredDown(instance);
|
||||
checkForDebugCode(instance);
|
||||
}
|
||||
|
||||
(function testTierDownToLiftoff() {
|
Loading…
Reference in New Issue
Block a user