Revert "Reland "[wasm] Compile JS to WASM wrappers asynchronously""

This reverts commit 801930f5f5.

Reason for revert: blink layout tests failure https://bugs.chromium.org/p/v8/issues/detail?id=9554

Original change's description:
> Reland "[wasm] Compile JS to WASM wrappers asynchronously"
> 
> Original CL had an issue with builtins being accessed through the
> isolate after the isolate died. See:
> https://ci.chromium.org/p/v8/builders/try.triggered/v8_win64_rel_ng_triggered/b8907837534672203296
> 
> Initial upload is the original CL and the following patch sets will
> attempt to fix it.
> 
> Original CL:
> 
> > [wasm] Compile JS to WASM wrappers asynchronously
> >
> > R=mstarzinger@chromium.org, ahaas@chromium.org
> >
> > Bug: v8:9231
> > Change-Id: I9e18073bbe25bf8c9c5f9ace102316e6209d0459
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1669699
> > Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
> > Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> > Reviewed-by: Andreas Haas <ahaas@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#62672}
> 
> R=​mstarzinger@chromium.org, ahaas@chromium.org
> 
> Bug: v8:9231
> Change-Id: I1b01d5d2b9f728d6f6a90fe9b642f5ba3bf686eb
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1708485
> Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#62918}

TBR=mstarzinger@chromium.org,thibaudm@chromium.org

Change-Id: I3a6829692614c44bacb764ef02723e61a3d61763
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:9231
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1719231
Reviewed-by: Zhi An Ng <zhin@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62927}
This commit is contained in:
Zhi An Ng 2019-07-25 22:47:33 +00:00 committed by Commit Bot
parent bf7284b90c
commit 11e27b5e6a
17 changed files with 118 additions and 312 deletions

View File

@ -289,7 +289,13 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
} else if (target_is_isolate_independent_builtin &&
options().inline_offheap_trampolines) {
// Inline the trampoline.
CallBuiltin(builtin_index);
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(ip, cond);
return;
}
@ -317,18 +323,6 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
void TurboAssembler::CallBuiltin(int builtin_index, Condition cond) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
DCHECK(FLAG_embedded_builtins);
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(ip, cond);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
@ -1838,8 +1832,6 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else if (options().inline_offheap_trampolines) {
CallBuiltin(Builtins::kDoubleToI);
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}

View File

@ -304,7 +304,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index, Condition cond = al);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;

View File

@ -1900,7 +1900,14 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
CallBuiltin(builtin_index);
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
return;
}
}
@ -1944,19 +1951,6 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
DCHECK(FLAG_embedded_builtins);
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
@ -2380,8 +2374,6 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
// DoubleToI preserves any registers it needs to clobber.
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else if (options().inline_offheap_trampolines) {
CallBuiltin(Builtins::kDoubleToI);
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}

View File

@ -896,7 +896,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;

View File

@ -1875,7 +1875,11 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
CallBuiltin(builtin_index);
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
call(entry, RelocInfo::OFF_HEAP_TARGET);
return;
}
}
@ -1903,16 +1907,6 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
call(builtin_index);
}
void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
DCHECK(FLAG_embedded_builtins);
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
call(entry, RelocInfo::OFF_HEAP_TARGET);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating

View File

@ -91,7 +91,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;

View File

@ -1594,7 +1594,12 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
CallBuiltin(builtin_index);
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
return;
}
}
@ -1629,17 +1634,6 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(EntryFromBuiltinIndexAsOperand(builtin_index));
}
void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
DCHECK(FLAG_embedded_builtins);
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating

View File

@ -344,7 +344,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;

View File

@ -91,7 +91,6 @@ CodeGenerator::CodeGenerator(
code_kind == Code::WASM_TO_CAPI_FUNCTION ||
code_kind == Code::WASM_TO_JS_FUNCTION ||
code_kind == Code::WASM_INTERPRETER_ENTRY ||
code_kind == Code::JS_TO_WASM_FUNCTION ||
(Builtins::IsBuiltinId(builtin_index) &&
Builtins::IsWasmRuntimeStub(builtin_index))) {
tasm_.set_abort_hard(true);

View File

@ -256,8 +256,6 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ wasm_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else if (tasm()->options().inline_offheap_trampolines) {
__ CallBuiltin(Builtins::kDoubleToI);
} else {
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
}

View File

@ -210,10 +210,6 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ near_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else if (tasm()->options().inline_offheap_trampolines) {
// With embedded builtins we do not need the isolate here. This allows
// the call to be generated asynchronously.
__ CallBuiltin(Builtins::kDoubleToI);
} else {
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
}

View File

@ -110,9 +110,6 @@ class PipelineData {
may_have_unverifiable_graph_(false),
zone_stats_(zone_stats),
pipeline_statistics_(pipeline_statistics),
roots_relative_addressing_enabled_(
!isolate->serializer_enabled() &&
!isolate->IsGeneratingEmbeddedBuiltins()),
graph_zone_scope_(zone_stats_, ZONE_NAME),
graph_zone_(graph_zone_scope_.zone()),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
@ -176,12 +173,12 @@ class PipelineData {
// For CodeStubAssembler and machine graph testing entry point.
PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
Isolate* isolate, AccountingAllocator* allocator, Graph* graph,
Schedule* schedule, SourcePositionTable* source_positions,
Isolate* isolate, Graph* graph, Schedule* schedule,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt,
const AssemblerOptions& assembler_options)
: isolate_(isolate),
allocator_(allocator),
allocator_(isolate->allocator()),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
@ -454,10 +451,6 @@ class PipelineData {
const char* debug_name() const { return debug_name_.get(); }
bool roots_relative_addressing_enabled() {
return roots_relative_addressing_enabled_;
}
private:
Isolate* const isolate_;
wasm::WasmEngine* const wasm_engine_ = nullptr;
@ -475,7 +468,6 @@ class PipelineData {
CodeGenerator* code_generator_ = nullptr;
Typer* typer_ = nullptr;
Typer::Flags typer_flags_ = Typer::kNoFlags;
bool roots_relative_addressing_enabled_ = false;
// All objects in the following group of fields are allocated in graph_zone_.
// They are all set to nullptr when the graph_zone_ is destroyed.
@ -1066,19 +1058,16 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
// we pass it to the CompilationJob constructor, but it is not
// dereferenced there.
: OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_,
"TurboFan", State::kReadyToExecute),
"TurboFan"),
debug_name_(std::move(debug_name)),
info_(CStrVector(debug_name_.get()), graph->zone(), kind),
call_descriptor_(call_descriptor),
zone_stats_(isolate->wasm_engine()->allocator()),
zone_stats_(isolate->allocator()),
zone_(std::move(zone)),
graph_(graph),
data_(&zone_stats_, &info_, isolate,
isolate->wasm_engine()->allocator(), graph_, nullptr,
source_positions, new (zone_.get()) NodeOriginTable(graph_),
nullptr, options),
pipeline_(&data_),
wasm_engine_(isolate->wasm_engine()) {}
data_(&zone_stats_, &info_, isolate, graph_, nullptr, source_positions,
new (zone_.get()) NodeOriginTable(graph_), nullptr, options),
pipeline_(&data_) {}
~WasmHeapStubCompilationJob() = default;
@ -1096,7 +1085,6 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
Graph* graph_;
PipelineData data_;
PipelineImpl pipeline_;
wasm::WasmEngine* wasm_engine_;
DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob);
};
@ -1117,14 +1105,10 @@ Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate,
CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
Isolate* isolate) {
return CompilationJob::SUCCEEDED;
}
CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(
&info_, wasm_engine_->GetOrCreateTurboStatistics(), &zone_stats_));
&info_, isolate->GetTurboStatistics(), &zone_stats_));
pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
}
if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) {
@ -1146,6 +1130,10 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
<< "\", \"source\":\"\",\n\"phases\":[";
}
pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
return CompilationJob::SUCCEEDED;
}
CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
pipeline_.ComputeScheduledGraph();
if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
return CompilationJob::SUCCEEDED;
@ -1868,9 +1856,10 @@ struct InstructionSelectionPhase {
FLAG_turbo_instruction_scheduling
? InstructionSelector::kEnableScheduling
: InstructionSelector::kDisableScheduling,
data->roots_relative_addressing_enabled()
? InstructionSelector::kEnableRootsRelativeAddressing
: InstructionSelector::kDisableRootsRelativeAddressing,
!data->isolate() || data->isolate()->serializer_enabled() ||
data->isolate()->IsGeneratingEmbeddedBuiltins()
? InstructionSelector::kDisableRootsRelativeAddressing
: InstructionSelector::kEnableRootsRelativeAddressing,
data->info()->GetPoisoningMitigationLevel(),
data->info()->trace_turbo_json_enabled()
? InstructionSelector::kEnableTraceTurboJson
@ -2367,8 +2356,8 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
JumpOptimizationInfo jump_opt;
bool should_optimize_jumps =
isolate->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph,
nullptr, source_positions, &node_origins,
PipelineData data(&zone_stats, &info, isolate, graph, nullptr,
source_positions, &node_origins,
should_optimize_jumps ? &jump_opt : nullptr, options);
data.set_verify_graph(FLAG_verify_csa);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
@ -2413,10 +2402,10 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
// First run code generation on a copy of the pipeline, in order to be able to
// repeat it for jump optimization. The first run has to happen on a temporary
// pipeline to avoid deletion of zones on the main pipeline.
PipelineData second_data(&zone_stats, &info, isolate, isolate->allocator(),
data.graph(), data.schedule(),
data.source_positions(), data.node_origins(),
data.jump_optimization_info(), options);
PipelineData second_data(&zone_stats, &info, isolate, data.graph(),
data.schedule(), data.source_positions(),
data.node_origins(), data.jump_optimization_info(),
options);
second_data.set_verify_graph(FLAG_verify_csa);
PipelineImpl second_pipeline(&second_data);
second_pipeline.SelectInstructionsAndAssemble(call_descriptor);
@ -2562,8 +2551,8 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
NodeOriginTable* node_positions = new (info->zone()) NodeOriginTable(graph);
PipelineData data(&zone_stats, info, isolate, isolate->allocator(), graph,
schedule, nullptr, node_positions, nullptr, options);
PipelineData data(&zone_stats, info, isolate, graph, schedule, nullptr,
node_positions, nullptr, options);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(

View File

@ -6087,7 +6087,7 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
// Create the Graph.
//----------------------------------------------------------------------------
std::unique_ptr<Zone> zone =
base::make_unique<Zone>(isolate->wasm_engine()->allocator(), ZONE_NAME);
base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
@ -6550,7 +6550,8 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
isolate, incoming, std::move(zone), graph, Code::C_WASM_ENTRY,
std::move(debug_name), AssemblerOptions::Default(isolate)));
if (job->ExecuteJob() == CompilationJob::FAILED ||
if (job->PrepareJob(isolate) == CompilationJob::FAILED ||
job->ExecuteJob() == CompilationJob::FAILED ||
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
return {};
}

View File

@ -265,12 +265,15 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(Isolate* isolate,
FunctionSig* sig,
bool is_import)
: is_import_(is_import),
sig_(sig),
job_(compiler::NewJSToWasmCompilationJob(isolate, sig, is_import)) {}
: job_(compiler::NewJSToWasmCompilationJob(isolate, sig, is_import)) {}
JSToWasmWrapperCompilationUnit::~JSToWasmWrapperCompilationUnit() = default;
void JSToWasmWrapperCompilationUnit::Prepare(Isolate* isolate) {
CompilationJob::Status status = job_->PrepareJob(isolate);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
}
void JSToWasmWrapperCompilationUnit::Execute() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileJSToWasmWrapper");
DCHECK_EQ(job_->state(), CompilationJob::State::kReadyToExecute);
@ -294,6 +297,7 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
Isolate* isolate, FunctionSig* sig, bool is_import) {
// Run the compilation unit synchronously.
JSToWasmWrapperCompilationUnit unit(isolate, sig, is_import);
unit.Prepare(isolate);
unit.Execute();
return unit.Finalize(isolate);
}

View File

@ -112,19 +112,15 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
bool is_import);
~JSToWasmWrapperCompilationUnit();
void Prepare(Isolate* isolate);
void Execute();
Handle<Code> Finalize(Isolate* isolate);
bool is_import() const { return is_import_; }
FunctionSig* sig() const { return sig_; }
// Run a compilation unit synchronously.
static Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
bool is_import);
private:
bool is_import_;
FunctionSig* sig_;
std::unique_ptr<OptimizedCompilationJob> job_;
};

View File

@ -381,7 +381,7 @@ class CompilationStateImpl {
// Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before {AddCompilationUnits}
// is invoked which triggers background compilation.
void InitializeCompilationProgress(bool lazy_module, int num_wrappers);
void InitializeCompilationProgress(bool lazy_module, int num_import_wrappers);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all
@ -389,24 +389,13 @@ class CompilationStateImpl {
void AddCallback(CompilationState::callback_t);
// Inserts new functions to compile and kicks off compilation.
void AddCompilationUnits(
Vector<WasmCompilationUnit> baseline_units,
Vector<WasmCompilationUnit> top_tier_units,
Vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units);
void AddCompilationUnits(Vector<WasmCompilationUnit> baseline_units,
Vector<WasmCompilationUnit> top_tier_units);
void AddTopTierCompilationUnit(WasmCompilationUnit);
base::Optional<WasmCompilationUnit> GetNextCompilationUnit(
int task_id, CompileBaselineOnly baseline_only);
std::shared_ptr<JSToWasmWrapperCompilationUnit>
GetNextJSToWasmWrapperCompilationUnit();
void FinalizeJSToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers);
void OnFinishedUnits(Vector<WasmCode*>);
void OnFinishedJSToWasmWrapperUnits(int num);
void TriggerCallbacks(bool completes_baseline_compilation,
bool completes_top_tier_compilation);
void OnBackgroundTaskStopped(int task_id, const WasmFeatures& detected);
void UpdateDetectedFeatures(const WasmFeatures& detected);
@ -494,13 +483,6 @@ class CompilationStateImpl {
// tasks a fair chance to utilize the worker threads on a regular basis.
std::atomic<double> next_compilation_deadline_{0};
// Index of the next wrapper to compile in {js_to_wasm_wrapper_units_}.
std::atomic<int> js_to_wasm_wrapper_id_{0};
// Wrapper compilation units are stored in shared_ptrs so that they are kept
// alive by the tasks even if the NativeModule dies.
std::vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units_;
// This mutex protects all information of this {CompilationStateImpl} which is
// being accessed concurrently.
mutable base::Mutex mutex_;
@ -729,11 +711,6 @@ class CompilationUnitBuilder {
}
}
void AddJSToWasmWrapperUnit(
std::shared_ptr<JSToWasmWrapperCompilationUnit> unit) {
js_to_wasm_wrapper_units_.emplace_back(std::move(unit));
}
void AddTopTierUnit(int func_index) {
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_->module(), compilation_state()->compile_mode(),
@ -752,13 +729,9 @@ class CompilationUnitBuilder {
}
bool Commit() {
if (baseline_units_.empty() && tiering_units_.empty() &&
js_to_wasm_wrapper_units_.empty()) {
return false;
}
compilation_state()->AddCompilationUnits(
VectorOf(baseline_units_), VectorOf(tiering_units_),
VectorOf(js_to_wasm_wrapper_units_));
if (baseline_units_.empty() && tiering_units_.empty()) return false;
compilation_state()->AddCompilationUnits(VectorOf(baseline_units_),
VectorOf(tiering_units_));
Clear();
return true;
}
@ -766,7 +739,6 @@ class CompilationUnitBuilder {
void Clear() {
baseline_units_.clear();
tiering_units_.clear();
js_to_wasm_wrapper_units_.clear();
}
private:
@ -778,8 +750,6 @@ class CompilationUnitBuilder {
const ExecutionTier default_tier_;
std::vector<WasmCompilationUnit> baseline_units_;
std::vector<WasmCompilationUnit> tiering_units_;
std::vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units_;
};
void SetCompileError(ErrorThrower* thrower, ModuleWireBytes wire_bytes,
@ -939,33 +909,6 @@ void RecordStats(const Code code, Counters* counters) {
constexpr int kMainThreadTaskId = -1;
bool ExecuteJSToWasmWrapperCompilationUnits(
const std::shared_ptr<BackgroundCompileToken>& token) {
std::shared_ptr<JSToWasmWrapperCompilationUnit> wrapper_unit = nullptr;
int num_processed_wrappers = 0;
do {
// TODO(thibaudm): Reschedule the compilation task if it takes too long, so
// that the background thread is not blocked.
{
BackgroundCompileScope compile_scope(token);
if (compile_scope.cancelled()) return false;
wrapper_unit = compile_scope.compilation_state()
->GetNextJSToWasmWrapperCompilationUnit();
}
if (wrapper_unit) {
wrapper_unit->Execute();
++num_processed_wrappers;
}
} while (wrapper_unit);
{
BackgroundCompileScope compile_scope(token);
if (compile_scope.cancelled()) return false;
compile_scope.compilation_state()->OnFinishedJSToWasmWrapperUnits(
num_processed_wrappers);
}
return true;
}
// Run by the main thread and background tasks to take part in compilation.
// Returns whether any units were executed.
bool ExecuteCompilationUnits(
@ -974,13 +917,6 @@ bool ExecuteCompilationUnits(
TRACE_COMPILE("Compiling (task %d)...\n", task_id);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ExecuteCompilationUnits");
// Execute JS to WASM wrapper units first, so that they are ready to be
// finalized by the main thread when the kFinishedBaselineCompilation event is
// triggered.
if (!ExecuteJSToWasmWrapperCompilationUnits(token)) {
return false;
}
const bool is_foreground = task_id == kMainThreadTaskId;
// The main thread uses task id 0, which might collide with one of the
// background tasks. This is fine, as it will only cause some contention on
@ -1113,32 +1049,6 @@ bool ExecuteCompilationUnits(
return true;
}
using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
// Returns the number of units added.
int AddExportWrapperUnits(Isolate* isolate, NativeModule* native_module,
CompilationUnitBuilder* builder) {
// Disable asynchronous wrapper compilation when builtins are not embedded,
// otherwise the isolate might be used after tear down to access builtins.
#ifdef V8_EMBEDDED_BUILTINS
std::unordered_set<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>> keys;
for (auto exp : native_module->module()->export_table) {
if (exp.kind != kExternalFunction) continue;
auto& function = native_module->module()->functions[exp.index];
JSToWasmWrapperKey key(function.imported, *function.sig);
if (keys.insert(key).second) {
auto unit = std::make_shared<JSToWasmWrapperCompilationUnit>(
isolate, function.sig, function.imported);
builder->AddJSToWasmWrapperUnit(std::move(unit));
}
}
return static_cast<int>(keys.size());
#else
return 0;
#endif
}
// Returns the number of units added.
int AddImportWrapperUnits(NativeModule* native_module,
CompilationUnitBuilder* builder) {
@ -1164,7 +1074,7 @@ int AddImportWrapperUnits(NativeModule* native_module,
return static_cast<int>(keys.size());
}
void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
void InitializeCompilationUnits(NativeModule* native_module) {
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
const bool lazy_module = IsLazyModule(native_module->module());
@ -1188,10 +1098,8 @@ void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
}
}
int num_import_wrappers = AddImportWrapperUnits(native_module, &builder);
int num_export_wrappers =
AddExportWrapperUnits(isolate, native_module, &builder);
compilation_state->InitializeCompilationProgress(
lazy_module, num_import_wrappers + num_export_wrappers);
compilation_state->InitializeCompilationProgress(lazy_module,
num_import_wrappers);
builder.Commit();
}
@ -1293,7 +1201,7 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
}
// Initialize the compilation units and kick off background compile tasks.
InitializeCompilationUnits(isolate, native_module);
InitializeCompilationUnits(native_module);
// If tiering is disabled, the main thread can execute any unit (all of them
// are part of initial compilation). Otherwise, just execute baseline units.
@ -1379,17 +1287,12 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
CompileNativeModule(isolate, thrower, wasm_module, native_module.get());
if (thrower->error()) return {};
// Compile JS->wasm wrappers for exported functions.
int num_wrappers = MaxNumExportWrappers(native_module->module());
*export_wrappers_out =
isolate->factory()->NewFixedArray(num_wrappers, AllocationType::kOld);
#ifdef V8_EMBEDDED_BUILTINS
Impl(native_module->compilation_state())
->FinalizeJSToWasmWrappers(isolate, native_module->module(),
*export_wrappers_out);
#else
CompileJsToWasmWrappers(isolate, native_module->module(),
*export_wrappers_out);
#endif
// Log the code within the generated module for profiling.
native_module->LogWasmCodes(isolate);
@ -1558,23 +1461,17 @@ void AsyncCompileJob::FinishCompile() {
}
isolate_->debug()->OnAfterCompile(script);
// We can only update the feature counts once the entire compile is done.
auto compilation_state =
Impl(module_object_->native_module()->compilation_state());
#ifndef V8_EMBEDDED_BUILTINS
CompileJsToWasmWrappers(isolate_, module_object_->native_module()->module(),
handle(module_object_->export_wrappers(), isolate_));
#else
compilation_state->PublishDetectedFeatures(isolate_);
// TODO(bbudge) Allow deserialization without wrapper compilation, so we can
// just compile wrappers here.
if (!is_after_deserialization) {
Handle<FixedArray> export_wrappers =
handle(module_object_->export_wrappers(), isolate_);
compilation_state->FinalizeJSToWasmWrappers(
isolate_, module_object_->module(), export_wrappers);
// TODO(wasm): compiling wrappers should be made async.
CompileWrappers();
}
#endif
// We can only update the feature counts once the entire compile is done.
compilation_state->PublishDetectedFeatures(isolate_);
FinishModule();
}
@ -1894,7 +1791,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// then DoAsync would do the same as NextStep already.
// Add compilation units and kick off compilation.
InitializeCompilationUnits(job->isolate(), job->native_module_.get());
InitializeCompilationUnits(job->native_module_.get());
}
}
};
@ -1955,8 +1852,17 @@ class AsyncCompileJob::CompileFinished : public CompileStep {
}
};
void AsyncCompileJob::CompileWrappers() {
// TODO(wasm): Compile all wrappers here, including the start function wrapper
// and the wrappers for the function table elements.
TRACE_COMPILE("(5) Compile wrappers...\n");
// Compile JS->wasm wrappers for exported functions.
CompileJsToWasmWrappers(isolate_, module_object_->native_module()->module(),
handle(module_object_->export_wrappers(), isolate_));
}
void AsyncCompileJob::FinishModule() {
TRACE_COMPILE("(4) Finish module...\n");
TRACE_COMPILE("(6) Finish module...\n");
AsyncCompileSucceeded(module_object_);
isolate_->wasm_engine()->RemoveCompileJob(this);
}
@ -2066,10 +1972,8 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
int num_import_wrappers =
AddImportWrapperUnits(native_module, compilation_unit_builder_.get());
int num_export_wrappers = AddExportWrapperUnits(
job_->isolate_, native_module, compilation_unit_builder_.get());
compilation_state->InitializeCompilationProgress(
lazy_module, num_import_wrappers + num_export_wrappers);
compilation_state->InitializeCompilationProgress(lazy_module,
num_import_wrappers);
return true;
}
@ -2240,8 +2144,8 @@ void CompilationStateImpl::AbortCompilation() {
callbacks_.clear();
}
void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module,
int num_wrappers) {
void CompilationStateImpl::InitializeCompilationProgress(
bool lazy_module, int num_import_wrappers) {
DCHECK(!failed());
auto enabled_features = native_module_->enabled_features();
auto* module = native_module_->module();
@ -2285,7 +2189,7 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module,
DCHECK_IMPLIES(lazy_module, outstanding_top_tier_functions_ == 0);
DCHECK_LE(0, outstanding_baseline_units_);
DCHECK_LE(outstanding_baseline_units_, outstanding_top_tier_functions_);
outstanding_baseline_units_ += num_wrappers;
outstanding_baseline_units_ += num_import_wrappers;
// Trigger callbacks if module needs no baseline or top tier compilation. This
// can be the case for an empty or fully lazy module.
@ -2310,50 +2214,15 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
void CompilationStateImpl::AddCompilationUnits(
Vector<WasmCompilationUnit> baseline_units,
Vector<WasmCompilationUnit> top_tier_units,
Vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units) {
if (!baseline_units.empty() || !top_tier_units.empty()) {
compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
native_module_->module());
}
js_to_wasm_wrapper_units_.insert(js_to_wasm_wrapper_units_.end(),
js_to_wasm_wrapper_units.begin(),
js_to_wasm_wrapper_units.end());
Vector<WasmCompilationUnit> top_tier_units) {
compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
native_module_->module());
RestartBackgroundTasks();
}
void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
AddCompilationUnits({}, {&unit, 1}, {});
}
std::shared_ptr<JSToWasmWrapperCompilationUnit>
CompilationStateImpl::GetNextJSToWasmWrapperCompilationUnit() {
int wrapper_id =
js_to_wasm_wrapper_id_.fetch_add(1, std::memory_order_relaxed);
if (wrapper_id < static_cast<int>(js_to_wasm_wrapper_units_.size())) {
return js_to_wasm_wrapper_units_[wrapper_id];
}
return nullptr;
}
void CompilationStateImpl::FinalizeJSToWasmWrappers(
Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers) {
// TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
// optimization we keep the code space unlocked to avoid repeated unlocking
// because many such wrapper are allocated in sequence below.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"FinalizeJSToWasmWrappers");
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
Handle<Code> code = unit->Finalize(isolate);
int wrapper_index =
GetExportWrapperIndex(module, unit->sig(), unit->is_import());
export_wrappers->set(wrapper_index, *code);
RecordStats(*code, isolate->counters());
}
AddCompilationUnits({}, {&unit, 1});
}
base::Optional<WasmCompilationUnit>
@ -2443,38 +2312,25 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
DCHECK_LE(0, outstanding_baseline_units_);
}
TriggerCallbacks(completes_baseline_compilation,
completes_top_tier_compilation);
}
}
void CompilationStateImpl::OnFinishedJSToWasmWrapperUnits(int num) {
if (num == 0) return;
base::MutexGuard guard(&callbacks_mutex_);
outstanding_baseline_units_ -= num;
bool completes_baseline_compilation = outstanding_baseline_units_ == 0;
TriggerCallbacks(completes_baseline_compilation, false);
}
void CompilationStateImpl::TriggerCallbacks(
bool completes_baseline_compilation, bool completes_top_tier_compilation) {
if (completes_baseline_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "BaselineFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
// Trigger callbacks.
if (completes_baseline_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "BaselineFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
if (outstanding_top_tier_functions_ == 0) {
completes_top_tier_compilation = true;
}
}
if (outstanding_top_tier_functions_ == 0) {
completes_top_tier_compilation = true;
if (outstanding_baseline_units_ == 0 && completes_top_tier_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedTopTierCompilation);
}
// Clear the callbacks because no more events will be delivered.
callbacks_.clear();
}
}
if (outstanding_baseline_units_ == 0 && completes_top_tier_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedTopTierCompilation);
}
// Clear the callbacks because no more events will be delivered.
callbacks_.clear();
}
}
void CompilationStateImpl::OnBackgroundTaskStopped(
@ -2522,11 +2378,6 @@ void CompilationStateImpl::RestartBackgroundTasks() {
if (failed()) return;
size_t max_num_restart = compilation_unit_queues_.GetTotalSize();
if (js_to_wasm_wrapper_id_ <
static_cast<int>(js_to_wasm_wrapper_units_.size())) {
max_num_restart +=
js_to_wasm_wrapper_units_.size() - js_to_wasm_wrapper_id_;
}
while (!available_task_ids_.empty() && max_num_restart-- > 0) {
int task_id = available_task_ids_.back();
@ -2566,6 +2417,7 @@ void CompilationStateImpl::SetError() {
}
namespace {
using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
using JSToWasmWrapperQueue =
WrapperQueue<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>>;
using JSToWasmWrapperUnitMap =
@ -2608,6 +2460,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
if (queue.insert(key)) {
auto unit = base::make_unique<JSToWasmWrapperCompilationUnit>(
isolate, function.sig, function.imported);
unit->Prepare(isolate);
compilation_units.emplace(key, std::move(unit));
}
}

View File

@ -153,6 +153,8 @@ class AsyncCompileJob {
void AsyncCompileSucceeded(Handle<WasmModuleObject> result);
void CompileWrappers();
void FinishModule();
void StartForegroundTask();