Reland "[wasm] Compile JS to WASM wrappers asynchronously"

Revert reason should be fixed with this CL:
https://chromium-review.googlesource.com/c/v8/v8/+/1722554

The underlying problem was the isolate being accessed from the
background compilation thread. Other known places where the isolate was
used have also been fixed in these CLs:
https://chromium-review.googlesource.com/c/v8/v8/+/1722555
https://chromium-review.googlesource.com/c/v8/v8/+/1722559
https://chromium-review.googlesource.com/c/v8/v8/+/1722556

Original CL:
> Reland "[wasm] Compile JS to WASM wrappers asynchronously"
>
> Original CL had an issue with builtins being accessed through the
> isolate after the isolate died. See:
> https://ci.chromium.org/p/v8/builders/try.triggered/v8_win64_rel_ng_triggered/b8907837534672203296
>
> Initial upload is the original CL and the following patch sets will
> attempt to fix it.
>
> Original CL:
>
> > [wasm] Compile JS to WASM wrappers asynchronously
> >
> > R=mstarzinger@chromium.org, ahaas@chromium.org
> >
> > Bug: v8:9231
> > Change-Id: I9e18073bbe25bf8c9c5f9ace102316e6209d0459
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1669699
> > Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
> > Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> > Reviewed-by: Andreas Haas <ahaas@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#62672}
>
> R=​mstarzinger@chromium.org, ahaas@chromium.org
>
> Bug: v8:9231
> Change-Id: I1b01d5d2b9f728d6f6a90fe9b642f5ba3bf686eb
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1708485
> Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#62918}

R=mstarzinger@chromium.org

Bug: v8:9231, v8:9554
Change-Id: I40443e7228eb26d6669e826e96073b20fa038c15
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1725619
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62985}
This commit is contained in:
Thibaud Michaud 2019-07-30 11:26:11 +02:00 committed by Commit Bot
parent 743ce7726d
commit 924ab19bcc
18 changed files with 338 additions and 136 deletions

View File

@ -289,13 +289,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
} else if (target_is_isolate_independent_builtin &&
options().inline_offheap_trampolines) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(ip, cond);
CallBuiltin(builtin_index);
return;
}
@ -323,6 +317,18 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
void TurboAssembler::CallBuiltin(int builtin_index, Condition cond) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
DCHECK(FLAG_embedded_builtins);
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(ip, cond);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
@ -1832,6 +1838,8 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else if (options().inline_offheap_trampolines) {
CallBuiltin(Builtins::kDoubleToI);
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}

View File

@ -304,6 +304,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index, Condition cond = al);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;

View File

@ -1900,14 +1900,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
CallBuiltin(builtin_index);
return;
}
}
@ -1951,6 +1944,19 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
DCHECK(FLAG_embedded_builtins);
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
@ -2374,6 +2380,8 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
// DoubleToI preserves any registers it needs to clobber.
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else if (options().inline_offheap_trampolines) {
CallBuiltin(Builtins::kDoubleToI);
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}

View File

@ -896,6 +896,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;

View File

@ -1875,11 +1875,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
call(entry, RelocInfo::OFF_HEAP_TARGET);
CallBuiltin(builtin_index);
return;
}
}
@ -1907,6 +1903,16 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
call(builtin_index);
}
void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
DCHECK(FLAG_embedded_builtins);
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
call(entry, RelocInfo::OFF_HEAP_TARGET);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating

View File

@ -91,6 +91,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;

View File

@ -1594,12 +1594,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
CallBuiltin(builtin_index);
return;
}
}
@ -1634,6 +1629,17 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(EntryFromBuiltinIndexAsOperand(builtin_index));
}
void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
DCHECK(FLAG_embedded_builtins);
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating

View File

@ -344,6 +344,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;

View File

@ -91,6 +91,7 @@ CodeGenerator::CodeGenerator(
code_kind == Code::WASM_TO_CAPI_FUNCTION ||
code_kind == Code::WASM_TO_JS_FUNCTION ||
code_kind == Code::WASM_INTERPRETER_ENTRY ||
code_kind == Code::JS_TO_WASM_FUNCTION ||
(Builtins::IsBuiltinId(builtin_index) &&
Builtins::IsWasmRuntimeStub(builtin_index))) {
tasm_.set_abort_hard(true);

View File

@ -256,6 +256,8 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ wasm_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else if (tasm()->options().inline_offheap_trampolines) {
__ CallBuiltin(Builtins::kDoubleToI);
} else {
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
}

View File

@ -210,6 +210,10 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
__ near_call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else if (tasm()->options().inline_offheap_trampolines) {
// With embedded builtins we do not need the isolate here. This allows
// the call to be generated asynchronously.
__ CallBuiltin(Builtins::kDoubleToI);
} else {
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
}

View File

@ -110,6 +110,9 @@ class PipelineData {
may_have_unverifiable_graph_(false),
zone_stats_(zone_stats),
pipeline_statistics_(pipeline_statistics),
roots_relative_addressing_enabled_(
!isolate->serializer_enabled() &&
!isolate->IsGeneratingEmbeddedBuiltins()),
graph_zone_scope_(zone_stats_, ZONE_NAME),
graph_zone_(graph_zone_scope_.zone()),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
@ -173,12 +176,12 @@ class PipelineData {
// For CodeStubAssembler and machine graph testing entry point.
PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
Isolate* isolate, Graph* graph, Schedule* schedule,
SourcePositionTable* source_positions,
Isolate* isolate, AccountingAllocator* allocator, Graph* graph,
Schedule* schedule, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt,
const AssemblerOptions& assembler_options)
: isolate_(isolate),
allocator_(isolate->allocator()),
allocator_(allocator),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
@ -451,6 +454,10 @@ class PipelineData {
const char* debug_name() const { return debug_name_.get(); }
bool roots_relative_addressing_enabled() {
return roots_relative_addressing_enabled_;
}
private:
Isolate* const isolate_;
wasm::WasmEngine* const wasm_engine_ = nullptr;
@ -468,6 +475,7 @@ class PipelineData {
CodeGenerator* code_generator_ = nullptr;
Typer* typer_ = nullptr;
Typer::Flags typer_flags_ = Typer::kNoFlags;
bool roots_relative_addressing_enabled_ = false;
// All objects in the following group of fields are allocated in graph_zone_.
// They are all set to nullptr when the graph_zone_ is destroyed.
@ -1046,7 +1054,8 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
public:
WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
WasmHeapStubCompilationJob(Isolate* isolate, wasm::WasmEngine* wasm_engine,
CallDescriptor* call_descriptor,
std::unique_ptr<Zone> zone, Graph* graph,
Code::Kind kind,
std::unique_ptr<char[]> debug_name,
@ -1055,16 +1064,19 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
// Note that the OptimizedCompilationInfo is not initialized at the time
// we pass it to the CompilationJob constructor, but it is not
// dereferenced there.
: OptimizedCompilationJob(&info_, "TurboFan"),
: OptimizedCompilationJob(&info_, "TurboFan",
CompilationJob::State::kReadyToExecute),
debug_name_(std::move(debug_name)),
info_(CStrVector(debug_name_.get()), graph->zone(), kind),
call_descriptor_(call_descriptor),
zone_stats_(zone->allocator()),
zone_(std::move(zone)),
graph_(graph),
data_(&zone_stats_, &info_, isolate, graph_, nullptr, source_positions,
data_(&zone_stats_, &info_, isolate, wasm_engine->allocator(), graph_,
nullptr, source_positions,
new (zone_.get()) NodeOriginTable(graph_), nullptr, options),
pipeline_(&data_) {}
pipeline_(&data_),
wasm_engine_(wasm_engine) {}
~WasmHeapStubCompilationJob() = default;
@ -1082,30 +1094,33 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
Graph* graph_;
PipelineData data_;
PipelineImpl pipeline_;
wasm::WasmEngine* wasm_engine_;
DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob);
};
// static
std::unique_ptr<OptimizedCompilationJob>
Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate,
CallDescriptor* call_descriptor,
std::unique_ptr<Zone> zone,
Graph* graph, Code::Kind kind,
std::unique_ptr<char[]> debug_name,
const AssemblerOptions& options,
SourcePositionTable* source_positions) {
Pipeline::NewWasmHeapStubCompilationJob(
Isolate* isolate, wasm::WasmEngine* wasm_engine,
CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph,
Code::Kind kind, std::unique_ptr<char[]> debug_name,
const AssemblerOptions& options, SourcePositionTable* source_positions) {
return base::make_unique<WasmHeapStubCompilationJob>(
isolate, call_descriptor, std::move(zone), graph, kind,
isolate, wasm_engine, call_descriptor, std::move(zone), graph, kind,
std::move(debug_name), options, source_positions);
}
CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
Isolate* isolate) {
UNREACHABLE();
}
CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(
&info_, isolate->GetTurboStatistics(), &zone_stats_));
&info_, wasm_engine_->GetOrCreateTurboStatistics(), &zone_stats_));
pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
}
if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) {
@ -1127,10 +1142,6 @@ CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
<< "\", \"source\":\"\",\n\"phases\":[";
}
pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
return CompilationJob::SUCCEEDED;
}
CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
pipeline_.ComputeScheduledGraph();
if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
return CompilationJob::SUCCEEDED;
@ -1853,10 +1864,9 @@ struct InstructionSelectionPhase {
FLAG_turbo_instruction_scheduling
? InstructionSelector::kEnableScheduling
: InstructionSelector::kDisableScheduling,
!data->isolate() || data->isolate()->serializer_enabled() ||
data->isolate()->IsGeneratingEmbeddedBuiltins()
? InstructionSelector::kDisableRootsRelativeAddressing
: InstructionSelector::kEnableRootsRelativeAddressing,
data->roots_relative_addressing_enabled()
? InstructionSelector::kEnableRootsRelativeAddressing
: InstructionSelector::kDisableRootsRelativeAddressing,
data->info()->GetPoisoningMitigationLevel(),
data->info()->trace_turbo_json_enabled()
? InstructionSelector::kEnableTraceTurboJson
@ -2353,8 +2363,8 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
JumpOptimizationInfo jump_opt;
bool should_optimize_jumps =
isolate->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
PipelineData data(&zone_stats, &info, isolate, graph, nullptr,
source_positions, &node_origins,
PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph,
nullptr, source_positions, &node_origins,
should_optimize_jumps ? &jump_opt : nullptr, options);
data.set_verify_graph(FLAG_verify_csa);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
@ -2399,10 +2409,10 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
// First run code generation on a copy of the pipeline, in order to be able to
// repeat it for jump optimization. The first run has to happen on a temporary
// pipeline to avoid deletion of zones on the main pipeline.
PipelineData second_data(&zone_stats, &info, isolate, data.graph(),
data.schedule(), data.source_positions(),
data.node_origins(), data.jump_optimization_info(),
options);
PipelineData second_data(&zone_stats, &info, isolate, isolate->allocator(),
data.graph(), data.schedule(),
data.source_positions(), data.node_origins(),
data.jump_optimization_info(), options);
second_data.set_verify_graph(FLAG_verify_csa);
PipelineImpl second_pipeline(&second_data);
second_pipeline.SelectInstructionsAndAssemble(call_descriptor);
@ -2548,8 +2558,8 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
NodeOriginTable* node_positions = new (info->zone()) NodeOriginTable(graph);
PipelineData data(&zone_stats, info, isolate, graph, schedule, nullptr,
node_positions, nullptr, options);
PipelineData data(&zone_stats, info, isolate, isolate->allocator(), graph,
schedule, nullptr, node_positions, nullptr, options);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(

View File

@ -61,9 +61,10 @@ class Pipeline : public AllStatic {
// Returns a new compilation job for a wasm heap stub.
static std::unique_ptr<OptimizedCompilationJob> NewWasmHeapStubCompilationJob(
Isolate* isolate, CallDescriptor* call_descriptor,
std::unique_ptr<Zone> zone, Graph* graph, Code::Kind kind,
std::unique_ptr<char[]> debug_name, const AssemblerOptions& options,
Isolate* isolate, wasm::WasmEngine* wasm_engine,
CallDescriptor* call_descriptor, std::unique_ptr<Zone> zone, Graph* graph,
Code::Kind kind, std::unique_ptr<char[]> debug_name,
const AssemblerOptions& options,
SourcePositionTable* source_positions = nullptr);
// Run the pipeline on a machine graph and generate code.

View File

@ -6204,8 +6204,8 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
zone.get(), false, params + 1, CallDescriptor::kNoFlags);
return Pipeline::NewWasmHeapStubCompilationJob(
isolate, incoming, std::move(zone), graph, Code::JS_TO_WASM_FUNCTION,
std::move(debug_name), WasmAssemblerOptions());
isolate, wasm_engine, incoming, std::move(zone), graph,
Code::JS_TO_WASM_FUNCTION, std::move(debug_name), WasmAssemblerOptions());
}
std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
@ -6624,11 +6624,11 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
// Run the compilation job synchronously.
std::unique_ptr<OptimizedCompilationJob> job(
Pipeline::NewWasmHeapStubCompilationJob(
isolate, incoming, std::move(zone), graph, Code::JS_TO_JS_FUNCTION,
std::move(debug_name), AssemblerOptions::Default(isolate)));
isolate, isolate->wasm_engine(), incoming, std::move(zone), graph,
Code::JS_TO_JS_FUNCTION, std::move(debug_name),
AssemblerOptions::Default(isolate)));
if (job->PrepareJob(isolate) == CompilationJob::FAILED ||
job->ExecuteJob() == CompilationJob::FAILED ||
if (job->ExecuteJob() == CompilationJob::FAILED ||
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
return {};
}
@ -6680,11 +6680,11 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
// Run the compilation job synchronously.
std::unique_ptr<OptimizedCompilationJob> job(
Pipeline::NewWasmHeapStubCompilationJob(
isolate, incoming, std::move(zone), graph, Code::C_WASM_ENTRY,
std::move(debug_name), AssemblerOptions::Default(isolate)));
isolate, isolate->wasm_engine(), incoming, std::move(zone), graph,
Code::C_WASM_ENTRY, std::move(debug_name),
AssemblerOptions::Default(isolate)));
if (job->PrepareJob(isolate) == CompilationJob::FAILED ||
job->ExecuteJob() == CompilationJob::FAILED ||
if (job->ExecuteJob() == CompilationJob::FAILED ||
job->FinalizeJob(isolate) == CompilationJob::FAILED) {
return {};
}

View File

@ -265,19 +265,15 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
Isolate* isolate, WasmEngine* wasm_engine, FunctionSig* sig, bool is_import,
const WasmFeatures& enabled_features)
: job_(compiler::NewJSToWasmCompilationJob(isolate, wasm_engine, sig,
: is_import_(is_import),
sig_(sig),
job_(compiler::NewJSToWasmCompilationJob(isolate, wasm_engine, sig,
is_import, enabled_features)) {}
JSToWasmWrapperCompilationUnit::~JSToWasmWrapperCompilationUnit() = default;
void JSToWasmWrapperCompilationUnit::Prepare(Isolate* isolate) {
CompilationJob::Status status = job_->PrepareJob(isolate);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
}
void JSToWasmWrapperCompilationUnit::Execute() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileJSToWasmWrapper");
DCHECK_EQ(job_->state(), CompilationJob::State::kReadyToExecute);
CompilationJob::Status status = job_->ExecuteJob();
CHECK_EQ(status, CompilationJob::SUCCEEDED);
}
@ -300,7 +296,6 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
WasmFeatures enabled_features = WasmFeaturesFromIsolate(isolate);
JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
is_import, enabled_features);
unit.Prepare(isolate);
unit.Execute();
return unit.Finalize(isolate);
}

View File

@ -113,15 +113,19 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
const WasmFeatures& enabled_features);
~JSToWasmWrapperCompilationUnit();
void Prepare(Isolate* isolate);
void Execute();
Handle<Code> Finalize(Isolate* isolate);
bool is_import() const { return is_import_; }
FunctionSig* sig() const { return sig_; }
// Run a compilation unit synchronously.
static Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
bool is_import);
private:
bool is_import_;
FunctionSig* sig_;
std::unique_ptr<OptimizedCompilationJob> job_;
};

View File

@ -381,7 +381,7 @@ class CompilationStateImpl {
// Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before {AddCompilationUnits}
// is invoked which triggers background compilation.
void InitializeCompilationProgress(bool lazy_module, int num_import_wrappers);
void InitializeCompilationProgress(bool lazy_module, int num_wrappers);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all
@ -389,13 +389,24 @@ class CompilationStateImpl {
void AddCallback(CompilationState::callback_t);
// Inserts new functions to compile and kicks off compilation.
void AddCompilationUnits(Vector<WasmCompilationUnit> baseline_units,
Vector<WasmCompilationUnit> top_tier_units);
void AddCompilationUnits(
Vector<WasmCompilationUnit> baseline_units,
Vector<WasmCompilationUnit> top_tier_units,
Vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units);
void AddTopTierCompilationUnit(WasmCompilationUnit);
base::Optional<WasmCompilationUnit> GetNextCompilationUnit(
int task_id, CompileBaselineOnly baseline_only);
std::shared_ptr<JSToWasmWrapperCompilationUnit>
GetNextJSToWasmWrapperCompilationUnit();
void FinalizeJSToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers);
void OnFinishedUnits(Vector<WasmCode*>);
void OnFinishedJSToWasmWrapperUnits(int num);
void TriggerCallbacks(bool completes_baseline_compilation,
bool completes_top_tier_compilation);
void OnBackgroundTaskStopped(int task_id, const WasmFeatures& detected);
void UpdateDetectedFeatures(const WasmFeatures& detected);
@ -483,6 +494,13 @@ class CompilationStateImpl {
// tasks a fair chance to utilize the worker threads on a regular basis.
std::atomic<double> next_compilation_deadline_{0};
// Index of the next wrapper to compile in {js_to_wasm_wrapper_units_}.
std::atomic<int> js_to_wasm_wrapper_id_{0};
// Wrapper compilation units are stored in shared_ptrs so that they are kept
// alive by the tasks even if the NativeModule dies.
std::vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units_;
// This mutex protects all information of this {CompilationStateImpl} which is
// being accessed concurrently.
mutable base::Mutex mutex_;
@ -711,6 +729,11 @@ class CompilationUnitBuilder {
}
}
void AddJSToWasmWrapperUnit(
std::shared_ptr<JSToWasmWrapperCompilationUnit> unit) {
js_to_wasm_wrapper_units_.emplace_back(std::move(unit));
}
void AddTopTierUnit(int func_index) {
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_->module(), compilation_state()->compile_mode(),
@ -729,9 +752,13 @@ class CompilationUnitBuilder {
}
bool Commit() {
if (baseline_units_.empty() && tiering_units_.empty()) return false;
compilation_state()->AddCompilationUnits(VectorOf(baseline_units_),
VectorOf(tiering_units_));
if (baseline_units_.empty() && tiering_units_.empty() &&
js_to_wasm_wrapper_units_.empty()) {
return false;
}
compilation_state()->AddCompilationUnits(
VectorOf(baseline_units_), VectorOf(tiering_units_),
VectorOf(js_to_wasm_wrapper_units_));
Clear();
return true;
}
@ -739,6 +766,7 @@ class CompilationUnitBuilder {
void Clear() {
baseline_units_.clear();
tiering_units_.clear();
js_to_wasm_wrapper_units_.clear();
}
private:
@ -750,6 +778,8 @@ class CompilationUnitBuilder {
const ExecutionTier default_tier_;
std::vector<WasmCompilationUnit> baseline_units_;
std::vector<WasmCompilationUnit> tiering_units_;
std::vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units_;
};
void SetCompileError(ErrorThrower* thrower, ModuleWireBytes wire_bytes,
@ -909,6 +939,33 @@ void RecordStats(const Code code, Counters* counters) {
constexpr int kMainThreadTaskId = -1;
bool ExecuteJSToWasmWrapperCompilationUnits(
const std::shared_ptr<BackgroundCompileToken>& token) {
std::shared_ptr<JSToWasmWrapperCompilationUnit> wrapper_unit = nullptr;
int num_processed_wrappers = 0;
do {
// TODO(thibaudm): Reschedule the compilation task if it takes too long, so
// that the background thread is not blocked.
{
BackgroundCompileScope compile_scope(token);
if (compile_scope.cancelled()) return false;
wrapper_unit = compile_scope.compilation_state()
->GetNextJSToWasmWrapperCompilationUnit();
}
if (wrapper_unit) {
wrapper_unit->Execute();
++num_processed_wrappers;
}
} while (wrapper_unit);
{
BackgroundCompileScope compile_scope(token);
if (compile_scope.cancelled()) return false;
compile_scope.compilation_state()->OnFinishedJSToWasmWrapperUnits(
num_processed_wrappers);
}
return true;
}
// Run by the main thread and background tasks to take part in compilation.
// Returns whether any units were executed.
bool ExecuteCompilationUnits(
@ -917,6 +974,13 @@ bool ExecuteCompilationUnits(
TRACE_COMPILE("Compiling (task %d)...\n", task_id);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ExecuteCompilationUnits");
// Execute JS to WASM wrapper units first, so that they are ready to be
// finalized by the main thread when the kFinishedBaselineCompilation event is
// triggered.
if (!ExecuteJSToWasmWrapperCompilationUnits(token)) {
return false;
}
const bool is_foreground = task_id == kMainThreadTaskId;
// The main thread uses task id 0, which might collide with one of the
// background tasks. This is fine, as it will only cause some contention on
@ -1049,6 +1113,35 @@ bool ExecuteCompilationUnits(
return true;
}
using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
// Returns the number of units added.
int AddExportWrapperUnits(Isolate* isolate, WasmEngine* wasm_engine,
NativeModule* native_module,
CompilationUnitBuilder* builder,
const WasmFeatures& enabled_features) {
// Disable asynchronous wrapper compilation when builtins are not embedded,
// otherwise the isolate might be used after tear down to access builtins.
#ifdef V8_EMBEDDED_BUILTINS
std::unordered_set<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>> keys;
for (auto exp : native_module->module()->export_table) {
if (exp.kind != kExternalFunction) continue;
auto& function = native_module->module()->functions[exp.index];
JSToWasmWrapperKey key(function.imported, *function.sig);
if (keys.insert(key).second) {
auto unit = std::make_shared<JSToWasmWrapperCompilationUnit>(
isolate, wasm_engine, function.sig, function.imported,
enabled_features);
builder->AddJSToWasmWrapperUnit(std::move(unit));
}
}
return static_cast<int>(keys.size());
#else
return 0;
#endif
}
// Returns the number of units added.
int AddImportWrapperUnits(NativeModule* native_module,
CompilationUnitBuilder* builder) {
@ -1074,7 +1167,7 @@ int AddImportWrapperUnits(NativeModule* native_module,
return static_cast<int>(keys.size());
}
void InitializeCompilationUnits(NativeModule* native_module) {
void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) {
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
const bool lazy_module = IsLazyModule(native_module->module());
@ -1098,8 +1191,11 @@ void InitializeCompilationUnits(NativeModule* native_module) {
}
}
int num_import_wrappers = AddImportWrapperUnits(native_module, &builder);
compilation_state->InitializeCompilationProgress(lazy_module,
num_import_wrappers);
int num_export_wrappers =
AddExportWrapperUnits(isolate, isolate->wasm_engine(), native_module,
&builder, WasmFeaturesFromIsolate(isolate));
compilation_state->InitializeCompilationProgress(
lazy_module, num_import_wrappers + num_export_wrappers);
builder.Commit();
}
@ -1201,7 +1297,7 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
}
// Initialize the compilation units and kick off background compile tasks.
InitializeCompilationUnits(native_module);
InitializeCompilationUnits(isolate, native_module);
// If tiering is disabled, the main thread can execute any unit (all of them
// are part of initial compilation). Otherwise, just execute baseline units.
@ -1287,12 +1383,17 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
CompileNativeModule(isolate, thrower, wasm_module, native_module.get());
if (thrower->error()) return {};
// Compile JS->wasm wrappers for exported functions.
int num_wrappers = MaxNumExportWrappers(native_module->module());
*export_wrappers_out =
isolate->factory()->NewFixedArray(num_wrappers, AllocationType::kOld);
#ifdef V8_EMBEDDED_BUILTINS
Impl(native_module->compilation_state())
->FinalizeJSToWasmWrappers(isolate, native_module->module(),
*export_wrappers_out);
#else
CompileJsToWasmWrappers(isolate, native_module->module(),
*export_wrappers_out);
#endif
// Log the code within the generated module for profiling.
native_module->LogWasmCodes(isolate);
@ -1366,6 +1467,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
ModuleDecoder decoder_;
AsyncCompileJob* job_;
WasmEngine* wasm_engine_;
std::unique_ptr<CompilationUnitBuilder> compilation_unit_builder_;
int num_functions_ = 0;
};
@ -1461,17 +1563,24 @@ void AsyncCompileJob::FinishCompile() {
}
isolate_->debug()->OnAfterCompile(script);
// We can only update the feature counts once the entire compile is done.
auto compilation_state =
Impl(module_object_->native_module()->compilation_state());
compilation_state->PublishDetectedFeatures(isolate_);
// TODO(bbudge) Allow deserialization without wrapper compilation, so we can
// just compile wrappers here.
if (!is_after_deserialization) {
// TODO(wasm): compiling wrappers should be made async.
CompileWrappers();
#ifndef V8_EMBEDDED_BUILTINS
CompileJsToWasmWrappers(
isolate_, module_object_->native_module()->module(),
handle(module_object_->export_wrappers(), isolate_));
#else
Handle<FixedArray> export_wrappers =
handle(module_object_->export_wrappers(), isolate_);
compilation_state->FinalizeJSToWasmWrappers(
isolate_, module_object_->module(), export_wrappers);
#endif
}
// We can only update the feature counts once the entire compile is done.
compilation_state->PublishDetectedFeatures(isolate_);
FinishModule();
}
@ -1791,7 +1900,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// then DoAsync would do the same as NextStep already.
// Add compilation units and kick off compilation.
InitializeCompilationUnits(job->native_module_.get());
InitializeCompilationUnits(job->isolate(), job->native_module_.get());
}
}
};
@ -1852,17 +1961,8 @@ class AsyncCompileJob::CompileFinished : public CompileStep {
}
};
void AsyncCompileJob::CompileWrappers() {
// TODO(wasm): Compile all wrappers here, including the start function wrapper
// and the wrappers for the function table elements.
TRACE_COMPILE("(5) Compile wrappers...\n");
// Compile JS->wasm wrappers for exported functions.
CompileJsToWasmWrappers(isolate_, module_object_->native_module()->module(),
handle(module_object_->export_wrappers(), isolate_));
}
void AsyncCompileJob::FinishModule() {
TRACE_COMPILE("(6) Finish module...\n");
TRACE_COMPILE("(4) Finish module...\n");
AsyncCompileSucceeded(module_object_);
isolate_->wasm_engine()->RemoveCompileJob(this);
}
@ -1870,6 +1970,7 @@ void AsyncCompileJob::FinishModule() {
AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job)
: decoder_(job->enabled_features_),
job_(job),
wasm_engine_(job_->isolate_->wasm_engine()),
compilation_unit_builder_(nullptr) {}
void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
@ -1972,8 +2073,11 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
int num_import_wrappers =
AddImportWrapperUnits(native_module, compilation_unit_builder_.get());
compilation_state->InitializeCompilationProgress(lazy_module,
num_import_wrappers);
int num_export_wrappers = AddExportWrapperUnits(
job_->isolate_, wasm_engine_, native_module,
compilation_unit_builder_.get(), job_->enabled_features_);
compilation_state->InitializeCompilationProgress(
lazy_module, num_import_wrappers + num_export_wrappers);
return true;
}
@ -2144,8 +2248,8 @@ void CompilationStateImpl::AbortCompilation() {
callbacks_.clear();
}
void CompilationStateImpl::InitializeCompilationProgress(
bool lazy_module, int num_import_wrappers) {
void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module,
int num_wrappers) {
DCHECK(!failed());
auto enabled_features = native_module_->enabled_features();
auto* module = native_module_->module();
@ -2189,7 +2293,7 @@ void CompilationStateImpl::InitializeCompilationProgress(
DCHECK_IMPLIES(lazy_module, outstanding_top_tier_functions_ == 0);
DCHECK_LE(0, outstanding_baseline_units_);
DCHECK_LE(outstanding_baseline_units_, outstanding_top_tier_functions_);
outstanding_baseline_units_ += num_import_wrappers;
outstanding_baseline_units_ += num_wrappers;
// Trigger callbacks if module needs no baseline or top tier compilation. This
// can be the case for an empty or fully lazy module.
@ -2214,15 +2318,50 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
void CompilationStateImpl::AddCompilationUnits(
Vector<WasmCompilationUnit> baseline_units,
Vector<WasmCompilationUnit> top_tier_units) {
compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
native_module_->module());
Vector<WasmCompilationUnit> top_tier_units,
Vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units) {
if (!baseline_units.empty() || !top_tier_units.empty()) {
compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
native_module_->module());
}
js_to_wasm_wrapper_units_.insert(js_to_wasm_wrapper_units_.end(),
js_to_wasm_wrapper_units.begin(),
js_to_wasm_wrapper_units.end());
RestartBackgroundTasks();
}
void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
AddCompilationUnits({}, {&unit, 1});
AddCompilationUnits({}, {&unit, 1}, {});
}
std::shared_ptr<JSToWasmWrapperCompilationUnit>
CompilationStateImpl::GetNextJSToWasmWrapperCompilationUnit() {
int wrapper_id =
js_to_wasm_wrapper_id_.fetch_add(1, std::memory_order_relaxed);
if (wrapper_id < static_cast<int>(js_to_wasm_wrapper_units_.size())) {
return js_to_wasm_wrapper_units_[wrapper_id];
}
return nullptr;
}
void CompilationStateImpl::FinalizeJSToWasmWrappers(
Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers) {
// TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
// optimization we keep the code space unlocked to avoid repeated unlocking
// because many such wrapper are allocated in sequence below.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"FinalizeJSToWasmWrappers");
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
Handle<Code> code = unit->Finalize(isolate);
int wrapper_index =
GetExportWrapperIndex(module, unit->sig(), unit->is_import());
export_wrappers->set(wrapper_index, *code);
RecordStats(*code, isolate->counters());
}
}
base::Optional<WasmCompilationUnit>
@ -2312,25 +2451,38 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
DCHECK_LE(0, outstanding_baseline_units_);
}
// Trigger callbacks.
if (completes_baseline_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "BaselineFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
if (outstanding_top_tier_functions_ == 0) {
completes_top_tier_compilation = true;
}
TriggerCallbacks(completes_baseline_compilation,
completes_top_tier_compilation);
}
}
void CompilationStateImpl::OnFinishedJSToWasmWrapperUnits(int num) {
if (num == 0) return;
base::MutexGuard guard(&callbacks_mutex_);
outstanding_baseline_units_ -= num;
bool completes_baseline_compilation = outstanding_baseline_units_ == 0;
TriggerCallbacks(completes_baseline_compilation, false);
}
void CompilationStateImpl::TriggerCallbacks(
bool completes_baseline_compilation, bool completes_top_tier_compilation) {
if (completes_baseline_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "BaselineFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
if (outstanding_baseline_units_ == 0 && completes_top_tier_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedTopTierCompilation);
}
// Clear the callbacks because no more events will be delivered.
callbacks_.clear();
if (outstanding_top_tier_functions_ == 0) {
completes_top_tier_compilation = true;
}
}
if (outstanding_baseline_units_ == 0 && completes_top_tier_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedTopTierCompilation);
}
// Clear the callbacks because no more events will be delivered.
callbacks_.clear();
}
}
void CompilationStateImpl::OnBackgroundTaskStopped(
@ -2378,6 +2530,11 @@ void CompilationStateImpl::RestartBackgroundTasks() {
if (failed()) return;
size_t max_num_restart = compilation_unit_queues_.GetTotalSize();
if (js_to_wasm_wrapper_id_ <
static_cast<int>(js_to_wasm_wrapper_units_.size())) {
max_num_restart +=
js_to_wasm_wrapper_units_.size() - js_to_wasm_wrapper_id_;
}
while (!available_task_ids_.empty() && max_num_restart-- > 0) {
int task_id = available_task_ids_.back();
@ -2417,7 +2574,6 @@ void CompilationStateImpl::SetError() {
}
namespace {
using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
using JSToWasmWrapperQueue =
WrapperQueue<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>>;
using JSToWasmWrapperUnitMap =
@ -2462,7 +2618,6 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
auto unit = base::make_unique<JSToWasmWrapperCompilationUnit>(
isolate, isolate->wasm_engine(), function.sig, function.imported,
enabled_features);
unit->Prepare(isolate);
compilation_units.emplace(key, std::move(unit));
}
}

View File

@ -153,8 +153,6 @@ class AsyncCompileJob {
void AsyncCompileSucceeded(Handle<WasmModuleObject> result);
void CompileWrappers();
void FinishModule();
void StartForegroundTask();