[compiler-dispatcher] Enqueue tasks for non-eager inner funcs

Add suppose for compiling non-eager, non-top-level inner functions in
parallel, using the compiler dispatcher. This behaviour can be enabled
with --parallel-compile-tasks-for-lazy.

There are a couple of consequences:

  * To support this we need support for off-thread ScopeInfo
    deserialization, so this adds that too.
  * The previous --parallel-compile-tasks flag is renamed to the more
    descriptive --parallel-compile-tasks-for-eager-toplevel.
  * Both parallel-compile-tasks flags are moved onto
    UnoptimizedCompileFlags so that they can be enabled/disabled on a
    per-compile basis (e.g. enabled for streaming, disabled for
    re-parsing).
  * asm.js compilations can now happen without an active Context (in
    the compiler dispatcher's idle finalization) so we can't get a
    ContextId for metric reporting; we'd need to somehow fix this if we
    wanted asm.js UKM but for now it's probably fine.
  * Took the opportunity to clean up some of the "can preparse" logic in
    the parser.

Change-Id: I20b1ec6a6bacfe268808edc8d812b92370c5840d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3281924
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Toon Verwaest <verwaest@chromium.org>
Reviewed-by: Emanuel Ziegler <ecmziegler@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78183}
This commit is contained in:
Leszek Swirski 2021-12-01 13:31:16 +01:00 committed by V8 LUCI CQ
parent fc563c8708
commit 5ab1ec1e06
22 changed files with 236 additions and 123 deletions

View File

@ -174,7 +174,7 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope,
DeclareThis(avfactory);
}
ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
ModuleScope::ModuleScope(Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory)
: DeclarationScope(avfactory->zone(), MODULE_SCOPE, avfactory, scope_info),
module_descriptor_(nullptr) {
@ -188,7 +188,8 @@ ClassScope::ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous)
set_language_mode(LanguageMode::kStrict);
}
ClassScope::ClassScope(Isolate* isolate, Zone* zone,
template <typename IsolateT>
ClassScope::ClassScope(IsolateT* isolate, Zone* zone,
AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info)
: Scope(zone, CLASS_SCOPE, ast_value_factory, scope_info),
@ -222,6 +223,12 @@ ClassScope::ClassScope(Isolate* isolate, Zone* zone,
Context::MIN_CONTEXT_SLOTS + index);
}
}
template ClassScope::ClassScope(Isolate* isolate, Zone* zone,
AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info);
template ClassScope::ClassScope(LocalIsolate* isolate, Zone* zone,
AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info);
Scope::Scope(Zone* zone, ScopeType scope_type,
AstValueFactory* ast_value_factory, Handle<ScopeInfo> scope_info)
@ -398,7 +405,8 @@ bool Scope::ContainsAsmModule() const {
}
#endif // V8_ENABLE_WEBASSEMBLY
Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
template <typename IsolateT>
Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone,
ScopeInfo scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
@ -454,7 +462,7 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
handle(scope_info, isolate));
}
} else if (scope_info.scope_type() == MODULE_SCOPE) {
outer_scope = zone->New<ModuleScope>(isolate, handle(scope_info, isolate),
outer_scope = zone->New<ModuleScope>(handle(scope_info, isolate),
ast_value_factory);
} else {
DCHECK_EQ(scope_info.scope_type(), CATCH_SCOPE);
@ -502,6 +510,17 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
return innermost_scope;
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Scope* Scope::DeserializeScopeChain(
Isolate* isolate, Zone* zone, ScopeInfo scope_info,
DeclarationScope* script_scope, AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Scope* Scope::DeserializeScopeChain(
LocalIsolate* isolate, Zone* zone, ScopeInfo scope_info,
DeclarationScope* script_scope, AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode);
DeclarationScope* Scope::AsDeclarationScope() {
DCHECK(is_declaration_scope());
return static_cast<DeclarationScope*>(this);

View File

@ -163,7 +163,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
enum class DeserializationMode { kIncludingVariables, kScopesOnly };
static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
static Scope* DeserializeScopeChain(IsolateT* isolate, Zone* zone,
ScopeInfo scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
@ -1363,8 +1365,7 @@ class ModuleScope final : public DeclarationScope {
ModuleScope(DeclarationScope* script_scope, AstValueFactory* avfactory);
// Deserialization. Does not restore the module descriptor.
ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory);
ModuleScope(Handle<ScopeInfo> scope_info, AstValueFactory* avfactory);
// Returns nullptr in a deserialized scope.
SourceTextModuleDescriptor* module() const { return module_descriptor_; }
@ -1381,7 +1382,8 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
public:
ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous);
// Deserialization.
ClassScope(Isolate* isolate, Zone* zone, AstValueFactory* ast_value_factory,
template <typename IsolateT>
ClassScope(IsolateT* isolate, Zone* zone, AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info);
struct HeritageParsingScope {

View File

@ -1406,14 +1406,17 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
BackgroundCompileTask::BackgroundCompileTask(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
const UnoptimizedCompileState* compile_state,
std::unique_ptr<Utf16CharacterStream> character_stream,
ProducedPreparseData* preparse_data,
WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
TimedHistogram* timer, int max_stack_size)
: isolate_for_local_isolate_(isolate),
// TODO(leszeks): Create this from parent compile flags, to avoid
// accessing the Isolate.
flags_(
UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info)),
compile_state_(isolate),
compile_state_(*compile_state),
info_(std::make_unique<ParseInfo>(isolate, flags_, &compile_state_)),
stack_size_(max_stack_size),
worker_thread_runtime_call_stats_(worker_thread_runtime_stats),
@ -1530,7 +1533,23 @@ void BackgroundCompileTask::Run() {
// Parser needs to stay alive for finalizing the parsing on the main
// thread.
Parser parser(&isolate, info_.get(), script_);
if (flags().is_toplevel()) {
parser.InitializeEmptyScopeChain(info_.get());
} else {
// TODO(leszeks): Consider keeping Scope zones alive between compile tasks
// and passing the Scope for the FunctionLiteral through here directly
// without copying/deserializing.
Handle<SharedFunctionInfo> shared_info =
input_shared_info_.ToHandleChecked();
MaybeHandle<ScopeInfo> maybe_outer_scope_info;
if (shared_info->HasOuterScopeInfo()) {
maybe_outer_scope_info =
handle(shared_info->GetOuterScopeInfo(), &isolate);
}
parser.DeserializeScopeChain(
&isolate, info_.get(), maybe_outer_scope_info,
Scope::DeserializationMode::kIncludingVariables);
}
parser.ParseOnBackground(&isolate, info_.get(), start_position_,
end_position_, function_literal_id_);
@ -1760,6 +1779,9 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
UnoptimizedCompileFlags flags =
UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info);
flags.set_collect_source_positions(true);
// Prevent parallel tasks from being spawned by this job.
flags.set_post_parallel_compile_tasks_for_eager_toplevel(false);
flags.set_post_parallel_compile_tasks_for_lazy(false);
UnoptimizedCompileState compile_state(isolate);
ParseInfo parse_info(isolate, flags, &compile_state);

View File

@ -511,6 +511,7 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
// Compiler::FinalizeBackgroundCompileTask.
BackgroundCompileTask(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
const UnoptimizedCompileState* compile_state,
std::unique_ptr<Utf16CharacterStream> character_stream,
ProducedPreparseData* preparse_data,
WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,

View File

@ -79,6 +79,7 @@ LazyCompileDispatcher::~LazyCompileDispatcher() {
void LazyCompileDispatcher::Enqueue(
LocalIsolate* isolate, Handle<SharedFunctionInfo> shared_info,
const UnoptimizedCompileState* compile_state,
std::unique_ptr<Utf16CharacterStream> character_stream,
ProducedPreparseData* preparse_data) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
@ -87,9 +88,9 @@ void LazyCompileDispatcher::Enqueue(
std::unique_ptr<Job> job =
std::make_unique<Job>(std::make_unique<BackgroundCompileTask>(
isolate_, shared_info, std::move(character_stream), preparse_data,
worker_thread_runtime_call_stats_, background_compile_timer_,
static_cast<int>(max_stack_size_)));
isolate_, shared_info, compile_state, std::move(character_stream),
preparse_data, worker_thread_runtime_call_stats_,
background_compile_timer_, static_cast<int>(max_stack_size_)));
// Post a a background worker task to perform the compilation on the worker
// thread.
@ -208,6 +209,7 @@ void LazyCompileDispatcher::AbortAll() {
{
base::MutexGuard lock(&mutex_);
{
SharedToJobMap::IteratableScope iteratable_scope(
&shared_to_unoptimized_job_);
for (Job** job_entry : iteratable_scope) {
@ -218,6 +220,7 @@ void LazyCompileDispatcher::AbortAll() {
}
}
shared_to_unoptimized_job_.Clear();
}
}
LazyCompileDispatcher::Job* LazyCompileDispatcher::GetJobFor(
@ -311,6 +314,7 @@ void LazyCompileDispatcher::DoIdleWork(double deadline_in_seconds) {
Job* job;
{
base::MutexGuard lock(&mutex_);
{
SharedToJobMap::IteratableScope iteratable_scope(
&shared_to_unoptimized_job_);
@ -327,11 +331,18 @@ void LazyCompileDispatcher::DoIdleWork(double deadline_in_seconds) {
// Since we hold the lock here, we can be sure no jobs have become ready
// for finalization while we looped through the list.
if (it == end) return;
}
DCHECK_EQ(pending_background_jobs_.find(job),
pending_background_jobs_.end());
}
shared_to_unoptimized_job_.Delete(function, &job);
}
if (trace_compiler_dispatcher_) {
PrintF("LazyCompileDispatcher: idle finalizing job for ");
function.ShortPrint();
PrintF("\n");
}
if (job->state == Job::State::kReadyToFinalize) {
HandleScope scope(isolate_);

View File

@ -34,6 +34,7 @@ class AstValueFactory;
class BackgroundCompileTask;
class CancelableTaskManager;
class UnoptimizedCompileJob;
class UnoptimizedCompileState;
class FunctionLiteral;
class Isolate;
class ParseInfo;
@ -84,6 +85,7 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
~LazyCompileDispatcher();
void Enqueue(LocalIsolate* isolate, Handle<SharedFunctionInfo> shared_info,
const UnoptimizedCompileState* compile_state,
std::unique_ptr<Utf16CharacterStream> character_stream,
ProducedPreparseData* preparse_data);

View File

@ -440,7 +440,8 @@ DEFINE_NEG_IMPLICATION(enable_third_party_heap, turbo_allocation_folding)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_inlining)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, script_streaming)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, parallel_compile_tasks)
DEFINE_NEG_IMPLICATION(enable_third_party_heap,
parallel_compile_tasks_for_eager_toplevel)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, use_marking_progress_bar)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, move_object_start)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_marking)
@ -1566,11 +1567,17 @@ DEFINE_BOOL(compilation_cache, true, "enable compilation cache")
DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions")
// lazy-compile-dispatcher.cc
DEFINE_BOOL(parallel_compile_tasks, false, "enable parallel compile tasks")
DEFINE_BOOL(lazy_compile_dispatcher, false, "enable compiler dispatcher")
DEFINE_IMPLICATION(parallel_compile_tasks, lazy_compile_dispatcher)
DEFINE_BOOL(trace_compiler_dispatcher, false,
"trace compiler dispatcher activity")
DEFINE_BOOL(
parallel_compile_tasks_for_eager_toplevel, false,
"spawn parallel compile tasks for eagerly compiled, top-level functions")
DEFINE_IMPLICATION(parallel_compile_tasks_for_eager_toplevel,
lazy_compile_dispatcher)
DEFINE_BOOL(parallel_compile_tasks_for_lazy, false,
"spawn parallel compile tasks for all lazily compiled functions")
DEFINE_IMPLICATION(parallel_compile_tasks_for_lazy, lazy_compile_dispatcher)
// cpu-profiler.cc
DEFINE_INT(cpu_profiler_sampling_interval, 1000,
@ -2160,9 +2167,10 @@ DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
// before. Audit them, and remove any unneeded implications.
DEFINE_IMPLICATION(predictable, single_threaded_gc)
DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(predictable, lazy_compile_dispatcher)
DEFINE_NEG_IMPLICATION(predictable, parallel_compile_tasks)
DEFINE_NEG_IMPLICATION(predictable, stress_concurrent_inlining)
DEFINE_NEG_IMPLICATION(predictable, lazy_compile_dispatcher)
DEFINE_NEG_IMPLICATION(predictable, parallel_compile_tasks_for_eager_toplevel)
DEFINE_NEG_IMPLICATION(predictable, parallel_compile_tasks_for_lazy)
DEFINE_BOOL(predictable_gc_schedule, false,
"Predictable garbage collection schedule. Fixes heap growing, "
@ -2179,9 +2187,11 @@ DEFINE_NEG_IMPLICATION(predictable_gc_schedule, memory_reducer)
DEFINE_BOOL(single_threaded, false, "disable the use of background tasks")
DEFINE_IMPLICATION(single_threaded, single_threaded_gc)
DEFINE_NEG_IMPLICATION(single_threaded, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(single_threaded, lazy_compile_dispatcher)
DEFINE_NEG_IMPLICATION(single_threaded, parallel_compile_tasks)
DEFINE_NEG_IMPLICATION(single_threaded, stress_concurrent_inlining)
DEFINE_NEG_IMPLICATION(single_threaded, lazy_compile_dispatcher)
DEFINE_NEG_IMPLICATION(single_threaded,
parallel_compile_tasks_for_eager_toplevel)
DEFINE_NEG_IMPLICATION(single_threaded, parallel_compile_tasks_for_lazy)
//
// Parallel and concurrent GC (Orinoco) related flags.

View File

@ -2522,9 +2522,11 @@ void BytecodeGenerator::AddToEagerLiteralsIfEager(FunctionLiteral* literal) {
// Only parallel compile when there's a script (not the case for source
// position collection).
if (!script_.is_null() && literal->should_parallel_compile()) {
// If we are already eagerly compiling this function, it must be because of
// --parallel-compile-tasks.
DCHECK_IMPLIES(!literal->ShouldEagerCompile(), FLAG_parallel_compile_tasks);
// If we should normally be eagerly compiling this function, we must be here
// because of post_parallel_compile_tasks_for_eager_toplevel.
DCHECK_IMPLIES(
literal->ShouldEagerCompile(),
info()->flags().post_parallel_compile_tasks_for_eager_toplevel());
// There exists a lazy compile dispatcher.
DCHECK(info()->state()->dispatcher());
// There exists a cloneable character stream.
@ -2541,7 +2543,8 @@ void BytecodeGenerator::AddToEagerLiteralsIfEager(FunctionLiteral* literal) {
shared_info =
Compiler::GetSharedFunctionInfo(literal, script_, local_isolate_);
info()->state()->dispatcher()->Enqueue(
local_isolate_, shared_info, info()->character_stream()->Clone(),
local_isolate_, shared_info, info()->state(),
info()->character_stream()->Clone(),
literal->produced_preparse_data());
}
} else if (eager_inner_literals_ && literal->ShouldEagerCompile()) {

View File

@ -32,8 +32,8 @@ RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
RuntimeCallTimerScope::RuntimeCallTimerScope(LocalIsolate* isolate,
RuntimeCallCounterId counter_id) {
DCHECK_NOT_NULL(isolate->runtime_call_stats());
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
DCHECK_NOT_NULL(isolate->runtime_call_stats());
stats_ = isolate->runtime_call_stats();
stats_->Enter(&timer_, counter_id);
}

View File

@ -37,6 +37,10 @@ UnoptimizedCompileFlags::UnoptimizedCompileFlags(Isolate* isolate,
set_collect_source_positions(!FLAG_enable_lazy_source_positions ||
isolate->NeedsDetailedOptimizedCodeLineInfo());
set_allow_harmony_top_level_await(FLAG_harmony_top_level_await);
set_post_parallel_compile_tasks_for_eager_toplevel(
FLAG_parallel_compile_tasks_for_eager_toplevel);
set_post_parallel_compile_tasks_for_lazy(
FLAG_parallel_compile_tasks_for_lazy);
}
// static
@ -133,6 +137,8 @@ void UnoptimizedCompileFlags::SetFlagsFromFunction(T function) {
set_class_scope_has_private_brand(function->class_scope_has_private_brand());
set_has_static_private_methods_or_accessors(
function->has_static_private_methods_or_accessors());
set_private_name_lookup_skips_outer_class(
function->private_name_lookup_skips_outer_class());
set_is_toplevel(function->is_toplevel());
}

View File

@ -54,11 +54,14 @@ class Zone;
V(block_coverage_enabled, bool, 1, _) \
V(is_asm_wasm_broken, bool, 1, _) \
V(class_scope_has_private_brand, bool, 1, _) \
V(private_name_lookup_skips_outer_class, bool, 1, _) \
V(requires_instance_members_initializer, bool, 1, _) \
V(has_static_private_methods_or_accessors, bool, 1, _) \
V(might_always_opt, bool, 1, _) \
V(allow_natives_syntax, bool, 1, _) \
V(allow_lazy_compile, bool, 1, _) \
V(post_parallel_compile_tasks_for_eager_toplevel, bool, 1, _) \
V(post_parallel_compile_tasks_for_lazy, bool, 1, _) \
V(collect_source_positions, bool, 1, _) \
V(allow_harmony_top_level_await, bool, 1, _) \
V(is_repl_mode, bool, 1, _)

View File

@ -474,8 +474,9 @@ void Parser::InitializeEmptyScopeChain(ParseInfo* info) {
original_scope_ = script_scope;
}
template <typename IsolateT>
void Parser::DeserializeScopeChain(
Isolate* isolate, ParseInfo* info,
IsolateT* isolate, ParseInfo* info,
MaybeHandle<ScopeInfo> maybe_outer_scope_info,
Scope::DeserializationMode mode) {
InitializeEmptyScopeChain(info);
@ -492,6 +493,15 @@ void Parser::DeserializeScopeChain(
}
}
template void Parser::DeserializeScopeChain(
Isolate* isolate, ParseInfo* info,
MaybeHandle<ScopeInfo> maybe_outer_scope_info,
Scope::DeserializationMode mode);
template void Parser::DeserializeScopeChain(
LocalIsolate* isolate, ParseInfo* info,
MaybeHandle<ScopeInfo> maybe_outer_scope_info,
Scope::DeserializationMode mode);
namespace {
void MaybeProcessSourceRanges(ParseInfo* parse_info, Expression* root,
@ -2559,44 +2569,38 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
const bool is_top_level = AllowsLazyParsingWithoutUnresolvedVariables();
const bool is_eager_top_level_function = !is_lazy && is_top_level;
const bool is_lazy_top_level_function = is_lazy && is_top_level;
const bool is_lazy_inner_function = is_lazy && !is_top_level;
RCS_SCOPE(runtime_call_stats_, RuntimeCallCounterId::kParseFunctionLiteral,
RuntimeCallStats::kThreadSpecific);
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
// Determine whether we can still lazy parse the inner function.
// The preconditions are:
// - Lazy compilation has to be enabled.
// - Neither V8 natives nor native function declarations can be allowed,
// since parsing one would retroactively force the function to be
// eagerly compiled.
// - The invoker of this parser can't depend on the AST being eagerly
// built (either because the function is about to be compiled, or
// because the AST is going to be inspected for some reason).
// - Because of the above, we can't be attempting to parse a
// FunctionExpression; even without enclosing parentheses it might be
// immediately invoked.
// - The function literal shouldn't be hinted to eagerly compile.
// Determine whether we can lazy parse the inner function. Lazy compilation
// has to be enabled, which is either forced by overall parse flags or via a
// ParsingModeScope.
const bool can_preparse = parse_lazily();
// Inner functions will be parsed using a temporary Zone. After parsing, we
// will migrate unresolved variable into a Scope in the main Zone.
const bool should_preparse_inner = parse_lazily() && is_lazy_inner_function;
// If parallel compile tasks are enabled, and the function is an eager
// top level function, then we can pre-parse the function and parse / compile
// in a parallel task on a worker thread.
bool should_post_parallel_task =
parse_lazily() && is_eager_top_level_function &&
FLAG_parallel_compile_tasks && info()->dispatcher() &&
// Determine whether we can post any parallel compile tasks. Preparsing must
// be possible, there has to be a dispatcher, and the character stream must be
// cloneable.
const bool can_post_parallel_task =
can_preparse && info()->dispatcher() &&
scanner()->stream()->can_be_cloned_for_parallel_access();
// This may be modified later to reflect preparsing decision taken
bool should_preparse = (parse_lazily() && is_lazy_top_level_function) ||
should_preparse_inner || should_post_parallel_task;
// If parallel compile tasks are enabled, enable parallel compile for the
// subset of functions as defined by flags.
bool should_post_parallel_task =
can_post_parallel_task &&
((is_eager_top_level_function &&
flags().post_parallel_compile_tasks_for_eager_toplevel()) ||
(is_lazy && flags().post_parallel_compile_tasks_for_lazy()));
// Determine whether we should lazy parse the inner function. This will be
// when either the function is lazy by inspection, or when we force it to be
// preparsed now so that we can then post a parallel full parse & compile task
// for it.
const bool should_preparse =
can_preparse && (is_lazy || should_post_parallel_task);
ScopedPtrList<Statement> body(pointer_buffer());
int expected_property_count = 0;
@ -2607,8 +2611,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
int function_literal_id = GetNextFunctionLiteralId();
ProducedPreparseData* produced_preparse_data = nullptr;
// This Scope lives in the main zone. We'll migrate data into that zone later.
// Inner functions will be parsed using a temporary Zone. After parsing, we
// will migrate unresolved variable into a Scope in the main Zone.
Zone* parse_zone = should_preparse ? &preparser_zone_ : zone();
// This Scope lives in the main zone. We'll migrate data into that zone later.
DeclarationScope* scope = NewFunctionScope(kind, parse_zone);
SetLanguageMode(scope, language_mode);
#ifdef DEBUG
@ -3308,6 +3314,14 @@ void Parser::ParseOnBackground(LocalIsolate* isolate, ParseInfo* info,
DCHECK_EQ(function_literal_id, kFunctionLiteralIdTopLevel);
result = DoParseProgram(/* isolate = */ nullptr, info);
} else {
base::Optional<ClassScope::HeritageParsingScope> heritage;
if (V8_UNLIKELY(flags().private_name_lookup_skips_outer_class() &&
original_scope_->is_class_scope())) {
// If the function skips the outer class and the outer scope is a class,
// the function is in heritage position. Otherwise the function scope's
// skip bit will be correctly inherited from the outer scope.
heritage.emplace(original_scope_->AsClassScope());
}
result = DoParseFunction(/* isolate = */ nullptr, info, start_position,
end_position, function_literal_id,
info->function_name());

View File

@ -155,7 +155,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// This only deserializes the scope chain, but doesn't connect the scopes to
// their corresponding scope infos. Therefore, looking up variables in the
// deserialized scopes is not possible.
void DeserializeScopeChain(Isolate* isolate, ParseInfo* info,
template <typename IsolateT>
void DeserializeScopeChain(IsolateT* isolate, ParseInfo* info,
MaybeHandle<ScopeInfo> maybe_outer_scope_info,
Scope::DeserializationMode mode =
Scope::DeserializationMode::kScopesOnly);

View File

@ -12,6 +12,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compiler.h"
#include "src/codegen/pending-optimization-table.h"
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/debug/debug-evaluate.h"
#include "src/deoptimizer/deoptimizer.h"
@ -575,12 +576,20 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
SharedFunctionInfo sfi = function->shared();
if (sfi.abstract_code(isolate).kind() != CodeKind::INTERPRETED_FUNCTION &&
sfi.abstract_code(isolate).kind() != CodeKind::BUILTIN) {
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
if (sfi->abstract_code(isolate).kind() != CodeKind::INTERPRETED_FUNCTION &&
sfi->abstract_code(isolate).kind() != CodeKind::BUILTIN) {
return CrashUnlessFuzzing(isolate);
}
sfi.DisableOptimization(BailoutReason::kNeverOptimize);
// Make sure to finish compilation if there is a parallel lazy compilation in
// progress, to make sure that the compilation finalization doesn't clobber
// the SharedFunctionInfo's disable_optimization field.
if (isolate->lazy_compile_dispatcher() &&
isolate->lazy_compile_dispatcher()->IsEnqueued(sfi)) {
isolate->lazy_compile_dispatcher()->FinishNow(sfi);
}
sfi->DisableOptimization(BailoutReason::kNeverOptimize);
return ReadOnlyRoots(isolate).undefined_value();
}

View File

@ -1853,7 +1853,8 @@ class BackgroundCompileJob final : public JobTask {
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out, int compilation_id) {
Handle<FixedArray>* export_wrappers_out, int compilation_id,
v8::metrics::Recorder::ContextId context_id) {
const WasmModule* wasm_module = module.get();
WasmEngine* engine = GetWasmEngine();
base::OwnedVector<uint8_t> wire_bytes_copy =
@ -1890,8 +1891,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
// Sync compilation is user blocking, so we increase the priority.
native_module->compilation_state()->SetHighPriority();
v8::metrics::Recorder::ContextId context_id =
isolate->GetOrRegisterRecorderContextId(isolate->native_context());
CompileNativeModule(isolate, context_id, thrower, wasm_module, native_module,
export_wrappers_out);
bool cache_hit = !engine->UpdateNativeModuleCache(thrower->error(),

View File

@ -13,6 +13,7 @@
#include <functional>
#include <memory>
#include "include/v8-metrics.h"
#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/logging/metrics.h"
@ -52,7 +53,8 @@ V8_EXPORT_PRIVATE
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out, int compilation_id);
Handle<FixedArray>* export_wrappers_out, int compilation_id,
v8::metrics::Recorder::ContextId context_id);
void RecompileNativeModule(NativeModule* native_module,
TieringState new_tiering_state);

View File

@ -13,6 +13,7 @@
#include "src/execution/v8threads.h"
#include "src/handles/global-handles-inl.h"
#include "src/logging/counters.h"
#include "src/logging/metrics.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
#include "src/objects/managed-inl.h"
@ -490,10 +491,13 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
ModuleOrigin origin = language_mode == LanguageMode::kSloppy
? kAsmJsSloppyOrigin
: kAsmJsStrictOrigin;
// TODO(leszeks): If we want asm.js in UKM, we should figure out a way to pass
// the context id in here.
v8::metrics::Recorder::ContextId context_id =
v8::metrics::Recorder::ContextId::Empty();
ModuleResult result = DecodeWasmModule(
WasmFeatures::ForAsmjs(), bytes.start(), bytes.end(), false, origin,
isolate->counters(), isolate->metrics_recorder(),
isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
isolate->counters(), isolate->metrics_recorder(), context_id,
DecodingMethod::kSync, allocator());
if (result.failed()) {
// This happens once in a while when we have missed some limit check
@ -510,7 +514,7 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
Handle<FixedArray> export_wrappers;
std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
isolate, WasmFeatures::ForAsmjs(), thrower, std::move(result).value(),
bytes, &export_wrappers, compilation_id);
bytes, &export_wrappers, compilation_id, context_id);
if (!native_module) return {};
return AsmWasmData::New(isolate, std::move(native_module), export_wrappers,
@ -534,11 +538,12 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
const ModuleWireBytes& bytes) {
int compilation_id = next_compilation_id_.fetch_add(1);
TRACE_EVENT1("v8.wasm", "wasm.SyncCompile", "id", compilation_id);
ModuleResult result = DecodeWasmModule(
enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
v8::metrics::Recorder::ContextId context_id =
isolate->GetOrRegisterRecorderContextId(isolate->native_context());
ModuleResult result =
DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
isolate->counters(), isolate->metrics_recorder(),
isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
DecodingMethod::kSync, allocator());
context_id, DecodingMethod::kSync, allocator());
if (result.failed()) {
thrower->CompileFailed(result.error());
return {};
@ -549,7 +554,7 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
Handle<FixedArray> export_wrappers;
std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
isolate, enabled, thrower, std::move(result).value(), bytes,
&export_wrappers, compilation_id);
&export_wrappers, compilation_id, context_id);
if (!native_module) return {};
#ifdef DEBUG

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --compiler-dispatcher --parallel-compile-tasks --use-external-strings
// Flags: --compiler-dispatcher --parallel-compile-tasks-for-eager-toplevel --use-external-strings
(function(a) {
assertEquals(a, "IIFE");

View File

@ -79,6 +79,7 @@ class LazyCompileDispatcherTest : public TestWithNativeContext {
test::OuterParseInfoForShared(isolate, shared, &state);
if (dispatcher->IsEnqueued(shared)) return;
dispatcher->Enqueue(isolate->main_thread_local_isolate(), shared,
outer_parse_info->state(),
outer_parse_info->character_stream()->Clone(), nullptr);
}
};

View File

@ -80,7 +80,8 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
shared->function_literal_id(), nullptr);
return new BackgroundCompileTask(
isolate, shared, outer_parse_info->character_stream()->Clone(),
isolate, shared, outer_parse_info->state(),
outer_parse_info->character_stream()->Clone(),
function_literal->produced_preparse_data(),
isolate->counters()->worker_thread_runtime_call_stats(),
isolate->counters()->compile_function_on_background(), FLAG_stack_size);

View File

@ -137,7 +137,7 @@ class MemoryProtectionTest : public TestWithNativeContext {
std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
isolate(), WasmFeatures::All(), &thrower, std::move(result).value(),
ModuleWireBytes{base::ArrayVector(module_bytes)}, &export_wrappers,
kNoCompilationId);
kNoCompilationId, v8::metrics::Recorder::ContextId::Empty());
CHECK(!thrower.error());
CHECK_NOT_NULL(native_module);

View File

@ -94,7 +94,8 @@ INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = {
"lite_mode": ["--no-lazy-feedback-allocation", "--max-semi-space-size=*",
"--stress-concurrent-inlining"]
+ INCOMPATIBLE_FLAGS_PER_VARIANT["jitless"],
"predictable": ["--parallel-compile-tasks",
"predictable": ["--parallel-compile-tasks-for-eager-toplevel",
"--parallel-compile-tasks-for-lazy",
"--concurrent-recompilation",
"--stress-concurrent-allocation",
"--stress-concurrent-inlining"],
@ -111,7 +112,8 @@ INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = {
# implications defined in flag-definitions.h.
INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
"--concurrent-recompilation": ["--predictable", "--assert-types"],
"--parallel-compile-tasks": ["--predictable"],
"--parallel-compile-tasks-for-eager-toplevel": ["--predictable"],
"--parallel-compile-tasks-for-lazy": ["--predictable"],
"--gc-interval=*": ["--gc-interval=*"],
"--optimize-for-size": ["--max-semi-space-size=*"],
"--stress_concurrent_allocation":