From 3a961ad72e00f45b33a69ce78fe6dd38d8e55181 Mon Sep 17 00:00:00 2001 From: Maya Lekova Date: Thu, 9 Jan 2020 16:50:55 +0100 Subject: [PATCH] [turbofan] Disable concurrent inlining for OSR Bug: v8:7790 Change-Id: Idf066adcd5c3dca3004e2eaa0d8fa389755720af Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1991490 Reviewed-by: Michael Stanton Commit-Queue: Maya Lekova Cr-Commit-Position: refs/heads/master@{#65671} --- src/codegen/compiler.cc | 4 +- src/codegen/optimized-compilation-info.h | 6 +- src/compiler/bytecode-graph-builder.cc | 51 +++++++---- src/compiler/bytecode-graph-builder.h | 1 + src/compiler/js-call-reducer.cc | 90 ++++++++++--------- src/compiler/js-call-reducer.h | 10 ++- src/compiler/js-heap-broker.cc | 43 ++++----- src/compiler/js-heap-broker.h | 4 +- src/compiler/js-inlining-heuristic.cc | 4 +- src/compiler/js-inlining-heuristic.h | 2 + src/compiler/js-inlining.cc | 6 +- src/compiler/js-intrinsic-lowering.cc | 9 +- src/compiler/js-intrinsic-lowering.h | 8 +- .../js-native-context-specialization.cc | 25 +++--- .../js-native-context-specialization.h | 10 ++- src/compiler/pipeline.cc | 67 ++++++++++---- src/compiler/pipeline.h | 3 +- .../cctest/compiler/test-js-constant-cache.cc | 2 +- .../test-js-context-specialization.cc | 3 +- .../cctest/compiler/test-js-typed-lowering.cc | 2 +- .../compiler/test-representation-change.cc | 2 +- test/common/types-fuzz.h | 2 +- .../common-operator-reducer-unittest.cc | 2 +- .../constant-folding-reducer-unittest.cc | 2 +- test/unittests/compiler/graph-unittest.cc | 2 +- .../js-intrinsic-lowering-unittest.cc | 3 +- .../simplified-operator-reducer-unittest.cc | 2 +- test/unittests/compiler/typer-unittest.cc | 2 +- 28 files changed, 229 insertions(+), 138 deletions(-) diff --git a/src/codegen/compiler.cc b/src/codegen/compiler.cc index 621b751c67..8d6a39f658 100644 --- a/src/codegen/compiler.cc +++ b/src/codegen/compiler.cc @@ -865,7 +865,9 @@ MaybeHandle GetOptimizedCode(Handle function, // tolerate the lack of a script without bytecode. DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray()); std::unique_ptr job( - compiler::Pipeline::NewCompilationJob(isolate, function, has_script)); + compiler::Pipeline::NewCompilationJob( + isolate, function, has_script, + FLAG_concurrent_inlining && osr_offset.IsNone())); OptimizedCompilationInfo* compilation_info = job->compilation_info(); compilation_info->SetOptimizingForOsr(osr_offset, osr_frame); diff --git a/src/codegen/optimized-compilation-info.h b/src/codegen/optimized-compilation-info.h index eeb871cbb6..af3514b828 100644 --- a/src/codegen/optimized-compilation-info.h +++ b/src/codegen/optimized-compilation-info.h @@ -64,7 +64,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { kTraceHeapBroker = 1 << 17, kWasmRuntimeExceptionSupport = 1 << 18, kTurboControlFlowAwareAllocation = 1 << 19, - kTurboPreprocessRanges = 1 << 20 + kTurboPreprocessRanges = 1 << 20, + kConcurrentInlining = 1 << 21, }; // Construct a compilation info for optimized compilation. @@ -93,6 +94,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { // Flags used by optimized compilation. + void MarkAsConcurrentInlining() { SetFlag(kConcurrentInlining); } + bool is_concurrent_inlining() const { return GetFlag(kConcurrentInlining); } + void MarkAsTurboControlFlowAwareAllocation() { SetFlag(kTurboControlFlowAwareAllocation); } diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc index d607c3c672..662ed1b9e3 100644 --- a/src/compiler/bytecode-graph-builder.cc +++ b/src/compiler/bytecode-graph-builder.cc @@ -40,6 +40,7 @@ class BytecodeGraphBuilder { CallFrequency const& invocation_frequency, SourcePositionTable* source_positions, int inlining_id, BytecodeGraphBuilderFlags flags, + JSTypeHintLowering::Flags type_hint_lowering_flags, TickCounter* tick_counter); // Creates a graph by visiting bytecodes. @@ -369,6 +370,10 @@ class BytecodeGraphBuilder { NativeContextRef native_context() const { return native_context_; } SharedFunctionInfoRef shared_info() const { return shared_info_; } + bool should_disallow_heap_access() const { + return flags_ & BytecodeGraphBuilderFlag::kConcurrentInlining; + } + #define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name(); BYTECODE_LIST(DECLARE_VISIT_BYTECODE) #undef DECLARE_VISIT_BYTECODE @@ -433,6 +438,8 @@ class BytecodeGraphBuilder { SourcePosition const start_position_; + BytecodeGraphBuilderFlags const flags_; + TickCounter* const tick_counter_; static int const kBinaryOperationHintIndex = 1; @@ -940,7 +947,9 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( FeedbackVectorRef const& feedback_vector, BailoutId osr_offset, JSGraph* jsgraph, CallFrequency const& invocation_frequency, SourcePositionTable* source_positions, int inlining_id, - BytecodeGraphBuilderFlags flags, TickCounter* tick_counter) + BytecodeGraphBuilderFlags flags, + JSTypeHintLowering::Flags type_hint_lowering_flags, + TickCounter* tick_counter) : broker_(broker), local_zone_(local_zone), jsgraph_(jsgraph), @@ -948,11 +957,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( shared_info_(shared_info), feedback_vector_(feedback_vector), invocation_frequency_(invocation_frequency), - type_hint_lowering_( - broker, jsgraph, feedback_vector, - (flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized) - ? JSTypeHintLowering::kBailoutOnUninitialized - : JSTypeHintLowering::kNoFlags), + type_hint_lowering_(broker, jsgraph, feedback_vector, + type_hint_lowering_flags), frame_state_function_info_(common()->CreateFrameStateFunctionInfo( FrameStateType::kInterpretedFunction, bytecode_array().parameter_count(), bytecode_array().register_count(), @@ -962,8 +968,9 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( bytecode_analysis_(broker_->GetBytecodeAnalysis( bytecode_array().object(), osr_offset, flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness, - FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized - : SerializationPolicy::kSerializeIfNeeded)), + (flags & BytecodeGraphBuilderFlag::kConcurrentInlining) + ? SerializationPolicy::kAssumeSerialized + : SerializationPolicy::kSerializeIfNeeded)), environment_(nullptr), osr_(!osr_offset.IsNone()), currently_peeled_loop_offset_(-1), @@ -980,8 +987,9 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( state_values_cache_(jsgraph), source_positions_(source_positions), start_position_(shared_info.StartPosition(), inlining_id), + flags_(flags), tick_counter_(tick_counter) { - if (FLAG_concurrent_inlining) { + if (flags & BytecodeGraphBuilderFlag::kConcurrentInlining) { // With concurrent inlining on, the source position address doesn't change // because it's been copied from the heap. source_position_iterator_ = std::make_unique( @@ -1018,7 +1026,7 @@ FeedbackSource BytecodeGraphBuilder::CreateFeedbackSource(int slot_id) { } void BytecodeGraphBuilder::CreateGraph() { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); SourcePositionTable::Scope pos_scope(source_positions_, start_position_); // Set up the basic structure of the graph. Outputs for {Start} are the formal @@ -1313,7 +1321,7 @@ void BytecodeGraphBuilder::VisitBytecodes() { VisitSingleBytecode(); } - if (!FLAG_concurrent_inlining && has_one_shot_bytecode) { + if (!should_disallow_heap_access() && has_one_shot_bytecode) { // (For concurrent inlining this is done in the serializer instead.) isolate()->CountUsage( v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode); @@ -2007,7 +2015,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() { } void BytecodeGraphBuilder::VisitCreateBlockContext() { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); ScopeInfoRef scope_info( broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); const Operator* op = javascript()->CreateBlockContext(scope_info.object()); @@ -2016,7 +2024,7 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() { } void BytecodeGraphBuilder::VisitCreateFunctionContext() { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); ScopeInfoRef scope_info( broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); @@ -2027,7 +2035,7 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() { } void BytecodeGraphBuilder::VisitCreateEvalContext() { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); ScopeInfoRef scope_info( broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); @@ -2038,7 +2046,7 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() { } void BytecodeGraphBuilder::VisitCreateCatchContext() { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0); Node* exception = environment()->LookupRegister(reg); ScopeInfoRef scope_info( @@ -2050,7 +2058,7 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() { } void BytecodeGraphBuilder::VisitCreateWithContext() { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); ScopeInfoRef scope_info( @@ -2157,7 +2165,7 @@ void BytecodeGraphBuilder::VisitCloneObject() { } void BytecodeGraphBuilder::VisitGetTemplateObject() { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); FeedbackSource source = CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1)); TemplateObjectDescriptionRef description( @@ -4160,10 +4168,17 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter) { DCHECK(broker->IsSerializedForCompilation(shared_info, feedback_vector)); + JSTypeHintLowering::Flags type_hint_lowering_flags = + JSTypeHintLowering::kNoFlags; + if (flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized) { + type_hint_lowering_flags |= JSTypeHintLowering::kBailoutOnUninitialized; + } + BytecodeGraphBuilder builder( broker, local_zone, broker->target_native_context(), shared_info, feedback_vector, osr_offset, jsgraph, invocation_frequency, - source_positions, inlining_id, flags, tick_counter); + source_positions, inlining_id, flags, type_hint_lowering_flags, + tick_counter); builder.CreateGraph(); } diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h index 03e900c214..d9ef634294 100644 --- a/src/compiler/bytecode-graph-builder.h +++ b/src/compiler/bytecode-graph-builder.h @@ -33,6 +33,7 @@ enum class BytecodeGraphBuilderFlag : uint8_t { // bytecode analysis. kAnalyzeEnvironmentLiveness = 1 << 1, kBailoutOnUninitialized = 1 << 2, + kConcurrentInlining = 1 << 3, }; using BytecodeGraphBuilderFlags = base::Flags; diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc index 46ba313eaa..7ce757d1bf 100644 --- a/src/compiler/js-call-reducer.cc +++ b/src/compiler/js-call-reducer.cc @@ -1822,7 +1822,7 @@ Reduction JSCallReducer::ReduceMathMinMax(Node* node, const Operator* op, } Reduction JSCallReducer::Reduce(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); switch (node->opcode()) { case IrOpcode::kJSConstruct: @@ -1863,7 +1863,7 @@ void JSCallReducer::Finalize() { // ES6 section 22.1.1 The Array Constructor Reduction JSCallReducer::ReduceArrayConstructor(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* target = NodeProperties::GetValueInput(node, 0); @@ -1920,7 +1920,7 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) { // ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray ) Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_acess(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -2045,7 +2045,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) { // ES section #sec-function.prototype.bind Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_acess(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -2078,7 +2078,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { MapRef first_receiver_map(broker(), receiver_maps[0]); bool const is_constructor = first_receiver_map.is_constructor(); - if (FLAG_concurrent_inlining && !first_receiver_map.serialized_prototype()) { + if (should_disallow_heap_access() && + !first_receiver_map.serialized_prototype()) { TRACE_BROKER_MISSING(broker(), "serialized prototype on map " << first_receiver_map); return inference.NoChange(); @@ -2087,7 +2088,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { for (Handle const map : receiver_maps) { MapRef receiver_map(broker(), map); - if (FLAG_concurrent_inlining && !receiver_map.serialized_prototype()) { + if (should_disallow_heap_access() && !receiver_map.serialized_prototype()) { TRACE_BROKER_MISSING(broker(), "serialized prototype on map " << receiver_map); return inference.NoChange(); @@ -2176,7 +2177,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { // ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args) Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_acess(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -2190,7 +2191,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) { HeapObjectMatcher m(target); if (m.HasValue()) { JSFunctionRef function = m.Ref(broker()).AsJSFunction(); - if (FLAG_concurrent_inlining && !function.serialized()) { + if (should_disallow_heap_access() && !function.serialized()) { TRACE_BROKER_MISSING(broker(), "Serialize call on function " << function); return NoChange(); } @@ -2267,7 +2268,7 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) { MapHandles const& object_maps = inference.GetMaps(); MapRef candidate_map(broker(), object_maps[0]); - if (FLAG_concurrent_inlining && !candidate_map.serialized_prototype()) { + if (should_disallow_heap_access() && !candidate_map.serialized_prototype()) { TRACE_BROKER_MISSING(broker(), "prototype for map " << candidate_map); return inference.NoChange(); } @@ -2276,7 +2277,7 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) { // Check if we can constant-fold the {candidate_prototype}. for (size_t i = 0; i < object_maps.size(); ++i) { MapRef object_map(broker(), object_maps[i]); - if (FLAG_concurrent_inlining && !object_map.serialized_prototype()) { + if (should_disallow_heap_access() && !object_map.serialized_prototype()) { TRACE_BROKER_MISSING(broker(), "prototype for map " << object_map); return inference.NoChange(); } @@ -2410,7 +2411,7 @@ Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) { // ES #sec-object.prototype.isprototypeof Reduction JSCallReducer::ReduceObjectPrototypeIsPrototypeOf(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 1); @@ -2712,8 +2713,7 @@ class IteratingArrayBuiltinHelper { IteratingArrayBuiltinHelper(Node* node, JSHeapBroker* broker, JSGraph* jsgraph, CompilationDependencies* dependencies) - : disallow_heap_access_(FLAG_concurrent_inlining), - receiver_(NodeProperties::GetValueInput(node, 1)), + : receiver_(NodeProperties::GetValueInput(node, 1)), effect_(NodeProperties::GetEffectInput(node)), control_(NodeProperties::GetControlInput(node)), inference_(broker, receiver_, effect_) { @@ -2750,7 +2750,6 @@ class IteratingArrayBuiltinHelper { ElementsKind elements_kind() const { return elements_kind_; } private: - DisallowHeapAccessIf disallow_heap_access_; bool can_reduce_ = false; bool has_stability_dependency_ = false; Node* receiver_; @@ -2764,6 +2763,7 @@ class IteratingArrayBuiltinHelper { Reduction JSCallReducer::ReduceArrayForEach( Node* node, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2776,6 +2776,7 @@ Reduction JSCallReducer::ReduceArrayForEach( Reduction JSCallReducer::ReduceArrayReduce( Node* node, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2789,6 +2790,7 @@ Reduction JSCallReducer::ReduceArrayReduce( Reduction JSCallReducer::ReduceArrayReduceRight( Node* node, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2802,7 +2804,7 @@ Reduction JSCallReducer::ReduceArrayReduceRight( Reduction JSCallReducer::ReduceArrayMap(Node* node, const SharedFunctionInfoRef& shared) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2822,7 +2824,7 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node, Reduction JSCallReducer::ReduceArrayFilter( Node* node, const SharedFunctionInfoRef& shared) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2842,7 +2844,7 @@ Reduction JSCallReducer::ReduceArrayFilter( Reduction JSCallReducer::ReduceArrayFind(Node* node, const SharedFunctionInfoRef& shared) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2857,7 +2859,7 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, Reduction JSCallReducer::ReduceArrayFindIndex( Node* node, const SharedFunctionInfoRef& shared) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2910,7 +2912,7 @@ void JSCallReducer::RewirePostCallbackExceptionEdges(Node* check_throw, Reduction JSCallReducer::ReduceArrayEvery(Node* node, const SharedFunctionInfoRef& shared) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2926,7 +2928,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node, // ES7 Array.prototype.inludes(searchElement[, fromIndex]) // #sec-array.prototype.includes Reduction JSCallReducer::ReduceArrayIncludes(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2941,7 +2943,7 @@ Reduction JSCallReducer::ReduceArrayIncludes(Node* node) { // ES6 Array.prototype.indexOf(searchElement[, fromIndex]) // #sec-array.prototype.indexof Reduction JSCallReducer::ReduceArrayIndexOf(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2955,7 +2957,7 @@ Reduction JSCallReducer::ReduceArrayIndexOf(Node* node) { Reduction JSCallReducer::ReduceArraySome(Node* node, const SharedFunctionInfoRef& shared) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies()); if (!h.can_reduce()) return h.inference()->NoChange(); @@ -2970,7 +2972,7 @@ Reduction JSCallReducer::ReduceArraySome(Node* node, Reduction JSCallReducer::ReduceCallApiFunction( Node* node, const SharedFunctionInfoRef& shared) { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_acess(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -3431,7 +3433,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) { ObjectRef target_ref = m.Ref(broker()); if (target_ref.IsJSFunction()) { JSFunctionRef function = target_ref.AsJSFunction(); - if (FLAG_concurrent_inlining && !function.serialized()) { + if (should_disallow_heap_access() && !function.serialized()) { TRACE_BROKER_MISSING(broker(), "data for function " << function); return NoChange(); } @@ -3444,7 +3446,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) { return ReduceJSCall(node, function.shared()); } else if (target_ref.IsJSBoundFunction()) { JSBoundFunctionRef function = target_ref.AsJSBoundFunction(); - if (FLAG_concurrent_inlining && !function.serialized()) { + if (should_disallow_heap_access() && !function.serialized()) { TRACE_BROKER_MISSING(broker(), "data for function " << function); return NoChange(); } @@ -4025,7 +4027,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { if (target_ref.IsJSFunction()) { JSFunctionRef function = target_ref.AsJSFunction(); - if (FLAG_concurrent_inlining && !function.serialized()) { + if (should_disallow_heap_access() && !function.serialized()) { TRACE_BROKER_MISSING(broker(), "function, not serialized: " << function); return NoChange(); @@ -4086,7 +4088,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) { } } else if (target_ref.IsJSBoundFunction()) { JSBoundFunctionRef function = target_ref.AsJSBoundFunction(); - if (FLAG_concurrent_inlining && !function.serialized()) { + if (should_disallow_heap_access() && !function.serialized()) { TRACE_BROKER_MISSING(broker(), "function, not serialized: " << function); return NoChange(); @@ -4433,7 +4435,7 @@ void JSCallReducer::CheckIfElementsKind(Node* receiver_elements_kind, // ES6 section 22.1.3.18 Array.prototype.push ( ) Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -4569,7 +4571,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) { // ES6 section 22.1.3.17 Array.prototype.pop ( ) Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -4704,7 +4706,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) { // ES6 section 22.1.3.22 Array.prototype.shift ( ) Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -4922,7 +4924,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { // ES6 section 22.1.3.23 Array.prototype.slice ( ) Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); if (!FLAG_turbo_inline_array_builtins) return NoChange(); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); @@ -5000,7 +5002,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) { // ES6 section 22.1.2.2 Array.isArray ( arg ) Reduction JSCallReducer::ReduceArrayIsArray(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); // We certainly know that undefined is not an array. if (node->op()->ValueInputCount() < 3) { @@ -5025,7 +5027,7 @@ Reduction JSCallReducer::ReduceArrayIsArray(Node* node) { } Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 1); @@ -5052,7 +5054,7 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) { // ES #sec-%arrayiteratorprototype%.next Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -5707,7 +5709,7 @@ Node* JSCallReducer::CreateArtificialFrameState( } Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode()); ConstructParameters const& p = ConstructParametersOf(node->op()); @@ -5869,7 +5871,7 @@ bool JSCallReducer::DoPromiseChecks(MapInference* inference) { for (Handle map : receiver_maps) { MapRef receiver_map(broker(), map); if (!receiver_map.IsJSPromiseMap()) return false; - if (FLAG_concurrent_inlining && !receiver_map.serialized_prototype()) { + if (should_disallow_heap_access() && !receiver_map.serialized_prototype()) { TRACE_BROKER_MISSING(broker(), "prototype for map " << receiver_map); return false; } @@ -5934,7 +5936,7 @@ Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo( // ES section #sec-promise.prototype.finally Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -6050,7 +6052,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { } Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); @@ -6118,7 +6120,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { // ES section #sec-promise.resolve Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(should_disallow_heap_access()); DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 1); @@ -6234,7 +6236,7 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) { jsgraph()->Constant(TYPE##_ELEMENTS - \ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)); \ control = graph()->NewNode(common()->Branch(), check, control); \ - if (FLAG_concurrent_inlining) { \ + if (should_disallow_heap_access()) { \ values.push_back(jsgraph()->Constant( \ broker()->GetTypedArrayStringTag(TYPE##_ELEMENTS))); \ } else { \ @@ -6746,7 +6748,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext( } Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); Node* value = node->op()->ValueInputCount() >= 3 ? NodeProperties::GetValueInput(node, 2) @@ -6760,7 +6762,7 @@ Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) { Reduction JSCallReducer::ReduceArrayBufferViewAccessor( Node* node, InstanceType instance_type, FieldAccess const& access) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); Node* receiver = NodeProperties::GetValueInput(node, 1); Node* effect = NodeProperties::GetEffectInput(node); @@ -7069,7 +7071,7 @@ Reduction JSCallReducer::ReduceNumberParseInt(Node* node) { } Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); if (FLAG_force_slow_path) return NoChange(); if (node->op()->ValueInputCount() < 3) return NoChange(); @@ -7096,7 +7098,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { ZoneVector access_infos(graph()->zone()); AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); - if (FLAG_concurrent_inlining) { + if (should_disallow_heap_access()) { // Obtain precomputed access infos from the broker. for (auto map : regexp_maps) { MapRef map_ref(broker(), map); diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h index 1ddf50e65e..d118f99247 100644 --- a/src/compiler/js-call-reducer.h +++ b/src/compiler/js-call-reducer.h @@ -40,7 +40,11 @@ class SimplifiedOperatorBuilder; class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { public: // Flags that control the mode of operation. - enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 0 }; + enum Flag { + kNoFlags = 0u, + kBailoutOnUninitialized = 1u << 0, + kConcurrentInlining = 1u << 1 + }; using Flags = base::Flags; JSCallReducer(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, @@ -246,6 +250,10 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { Flags flags() const { return flags_; } CompilationDependencies* dependencies() const { return dependencies_; } + bool should_disallow_heap_access() const { + return flags() & kConcurrentInlining; + } + JSGraph* const jsgraph_; JSHeapBroker* const broker_; Zone* const temp_zone_; diff --git a/src/compiler/js-heap-broker.cc b/src/compiler/js-heap-broker.cc index 9d783e5344..6e6e7f6511 100644 --- a/src/compiler/js-heap-broker.cc +++ b/src/compiler/js-heap-broker.cc @@ -2343,7 +2343,7 @@ base::Optional ContextRef::get(int index, } JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone, - bool tracing_enabled) + bool tracing_enabled, bool is_concurrent_inlining) : isolate_(isolate), zone_(broker_zone), refs_(new (zone()) @@ -2351,6 +2351,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone, root_index_map_(isolate), array_and_object_prototypes_(zone()), tracing_enabled_(tracing_enabled), + is_concurrent_inlining_(is_concurrent_inlining), feedback_(zone()), bytecode_analyses_(zone()), property_access_infos_(zone()), @@ -4598,7 +4599,7 @@ ProcessedFeedback const& JSHeapBroker::GetFeedback( FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind( FeedbackSource const& source) const { - if (FLAG_concurrent_inlining) { + if (is_concurrent_inlining_) { ProcessedFeedback const& processed = GetFeedback(source); return processed.slot_kind(); } @@ -4607,7 +4608,7 @@ FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind( } bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const { - return FLAG_concurrent_inlining + return (is_concurrent_inlining_) ? GetFeedback(source).IsInsufficient() : FeedbackNexus(source.vector, source.slot).IsUninitialized(); } @@ -4806,8 +4807,8 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall( BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation( FeedbackSource const& source) { ProcessedFeedback const& feedback = - FLAG_concurrent_inlining ? GetFeedback(source) - : ProcessFeedbackForBinaryOperation(source); + (is_concurrent_inlining_) ? GetFeedback(source) + : ProcessFeedbackForBinaryOperation(source); return feedback.IsInsufficient() ? BinaryOperationHint::kNone : feedback.AsBinaryOperation().value(); } @@ -4815,14 +4816,14 @@ BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation( CompareOperationHint JSHeapBroker::GetFeedbackForCompareOperation( FeedbackSource const& source) { ProcessedFeedback const& feedback = - FLAG_concurrent_inlining ? GetFeedback(source) - : ProcessFeedbackForCompareOperation(source); + (is_concurrent_inlining_) ? GetFeedback(source) + : ProcessFeedbackForCompareOperation(source); return feedback.IsInsufficient() ? CompareOperationHint::kNone : feedback.AsCompareOperation().value(); } ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) { - ProcessedFeedback const& feedback = FLAG_concurrent_inlining + ProcessedFeedback const& feedback = (is_concurrent_inlining_) ? GetFeedback(source) : ProcessFeedbackForForIn(source); return feedback.IsInsufficient() ? ForInHint::kNone @@ -4832,46 +4833,46 @@ ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) { ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess( FeedbackSource const& source, AccessMode mode, base::Optional static_name) { - return FLAG_concurrent_inlining + return (is_concurrent_inlining_) ? GetFeedback(source) : ProcessFeedbackForPropertyAccess(source, mode, static_name); } ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf( FeedbackSource const& source) { - return FLAG_concurrent_inlining ? GetFeedback(source) - : ProcessFeedbackForInstanceOf(source); + return (is_concurrent_inlining_) ? GetFeedback(source) + : ProcessFeedbackForInstanceOf(source); } ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall( FeedbackSource const& source) { - return FLAG_concurrent_inlining ? GetFeedback(source) - : ProcessFeedbackForCall(source); + return (is_concurrent_inlining_) ? GetFeedback(source) + : ProcessFeedbackForCall(source); } ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess( FeedbackSource const& source) { - return FLAG_concurrent_inlining ? GetFeedback(source) - : ProcessFeedbackForGlobalAccess(source); + return (is_concurrent_inlining_) ? GetFeedback(source) + : ProcessFeedbackForGlobalAccess(source); } ProcessedFeedback const& JSHeapBroker::GetFeedbackForArrayOrObjectLiteral( FeedbackSource const& source) { - return FLAG_concurrent_inlining + return (is_concurrent_inlining_) ? GetFeedback(source) : ProcessFeedbackForArrayOrObjectLiteral(source); } ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral( FeedbackSource const& source) { - return FLAG_concurrent_inlining ? GetFeedback(source) - : ProcessFeedbackForRegExpLiteral(source); + return (is_concurrent_inlining_) ? GetFeedback(source) + : ProcessFeedbackForRegExpLiteral(source); } ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject( FeedbackSource const& source) { - return FLAG_concurrent_inlining ? GetFeedback(source) - : ProcessFeedbackForTemplateObject(source); + return (is_concurrent_inlining_) ? GetFeedback(source) + : ProcessFeedbackForTemplateObject(source); } ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral( @@ -5094,7 +5095,7 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo( AccessInfoFactory factory(this, dependencies, zone()); PropertyAccessInfo access_info = factory.ComputePropertyAccessInfo( map.object(), name.object(), access_mode); - if (FLAG_concurrent_inlining) { + if (is_concurrent_inlining_) { CHECK(SerializingAllowed()); TRACE(this, "Storing PropertyAccessInfo for " << access_mode << " of property " << name << " on map " diff --git a/src/compiler/js-heap-broker.h b/src/compiler/js-heap-broker.h index 60b910e991..3f2b24a662 100644 --- a/src/compiler/js-heap-broker.h +++ b/src/compiler/js-heap-broker.h @@ -73,7 +73,8 @@ struct PropertyAccessTarget { class V8_EXPORT_PRIVATE JSHeapBroker { public: - JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled); + JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled, + bool is_concurrent_inlining); // The compilation target's native context. We need the setter because at // broker construction time we don't yet have the canonical handle. @@ -242,6 +243,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker { array_and_object_prototypes_; BrokerMode mode_ = kDisabled; bool const tracing_enabled_; + bool const is_concurrent_inlining_; mutable StdoutStream trace_out_; unsigned trace_indentation_ = 0; PerIsolateCompilerCache* compiler_cache_ = nullptr; diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc index 1790969888..7df4d8ab83 100644 --- a/src/compiler/js-inlining-heuristic.cc +++ b/src/compiler/js-inlining-heuristic.cc @@ -127,7 +127,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( } Reduction JSInliningHeuristic::Reduce(Node* node) { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_acess(info_->is_concurrent_inlining()); if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange(); @@ -222,7 +222,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { } void JSInliningHeuristic::Finalize() { - DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_acess(info_->is_concurrent_inlining()); if (candidates_.empty()) return; // Nothing to do without candidates. if (FLAG_trace_turbo_inlining) PrintCandidates(); diff --git a/src/compiler/js-inlining-heuristic.h b/src/compiler/js-inlining-heuristic.h index 3830be4445..124c0719c0 100644 --- a/src/compiler/js-inlining-heuristic.h +++ b/src/compiler/js-inlining-heuristic.h @@ -22,6 +22,7 @@ class JSInliningHeuristic final : public AdvancedReducer { candidates_(local_zone), seen_(local_zone), source_positions_(source_positions), + info_(info), jsgraph_(jsgraph), broker_(broker) {} @@ -92,6 +93,7 @@ class JSInliningHeuristic final : public AdvancedReducer { Candidates candidates_; ZoneSet seen_; SourcePositionTable* source_positions_; + OptimizedCompilationInfo* info_; JSGraph* const jsgraph_; JSHeapBroker* const broker_; int total_inlined_bytecode_size_ = 0; diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc index f0850f01af..bfae66b3f9 100644 --- a/src/compiler/js-inlining.cc +++ b/src/compiler/js-inlining.cc @@ -419,7 +419,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) { // always hold true. CHECK(shared_info->is_compiled()); - if (!FLAG_concurrent_inlining && info_->is_source_positions_enabled()) { + if (!info_->is_concurrent_inlining() && + info_->is_source_positions_enabled()) { SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(), shared_info->object()); } @@ -457,6 +458,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) { if (info_->is_bailout_on_uninitialized()) { flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized; } + if (info_->is_concurrent_inlining()) { + flags |= BytecodeGraphBuilderFlag::kConcurrentInlining; + } { CallFrequency frequency = call.frequency(); BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_vector, diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc index 8e3948a9ce..b47fe2e7e5 100644 --- a/src/compiler/js-intrinsic-lowering.cc +++ b/src/compiler/js-intrinsic-lowering.cc @@ -22,11 +22,14 @@ namespace internal { namespace compiler { JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph, - JSHeapBroker* broker) - : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker) {} + JSHeapBroker* broker, Flags flags) + : AdvancedReducer(editor), + jsgraph_(jsgraph), + broker_(broker), + flags_(flags) {} Reduction JSIntrinsicLowering::Reduce(Node* node) { - DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf no_heap_access(flags_ & kConcurrentInlining); if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange(); const Runtime::Function* const f = diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h index 2331446c12..11613dcfec 100644 --- a/src/compiler/js-intrinsic-lowering.h +++ b/src/compiler/js-intrinsic-lowering.h @@ -31,7 +31,12 @@ class SimplifiedOperatorBuilder; class V8_EXPORT_PRIVATE JSIntrinsicLowering final : public NON_EXPORTED_BASE(AdvancedReducer) { public: - JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker); + // Flags that control the mode of operation. + enum Flag { kNoFlags = 0u, kConcurrentInlining = 1u << 0 }; + using Flags = base::Flags; + + JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, + Flags flags); ~JSIntrinsicLowering() final = default; const char* reducer_name() const override { return "JSIntrinsicLowering"; } @@ -90,6 +95,7 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final JSGraph* const jsgraph_; JSHeapBroker* const broker_; + Flags const flags_; }; } // namespace compiler diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc index a7ea601857..eb938a87e1 100644 --- a/src/compiler/js-native-context-specialization.cc +++ b/src/compiler/js-native-context-specialization.cc @@ -69,7 +69,7 @@ JSNativeContextSpecialization::JSNativeContextSpecialization( type_cache_(TypeCache::Get()) {} Reduction JSNativeContextSpecialization::Reduce(Node* node) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); switch (node->opcode()) { case IrOpcode::kJSAdd: @@ -351,7 +351,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor( if (!m.HasValue()) return NoChange(); JSFunctionRef function = m.Ref(broker()).AsJSFunction(); MapRef function_map = function.map(); - if (FLAG_concurrent_inlining && !function_map.serialized_prototype()) { + if (should_disallow_heap_access() && !function_map.serialized_prototype()) { TRACE_BROKER_MISSING(broker(), "data for map " << function_map); return NoChange(); } @@ -403,7 +403,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { MapRef receiver_map = receiver_ref.map(); PropertyAccessInfo access_info = PropertyAccessInfo::Invalid(graph()->zone()); - if (FLAG_concurrent_inlining) { + if (should_disallow_heap_access()) { access_info = broker()->GetPropertyAccessInfo( receiver_map, NameRef(broker(), isolate()->factory()->has_instance_symbol()), @@ -529,7 +529,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain( all = false; break; } - if (FLAG_concurrent_inlining && !map.serialized_prototype()) { + if (should_disallow_heap_access() && !map.serialized_prototype()) { TRACE_BROKER_MISSING(broker(), "prototype data for map " << map); return kMayBeInPrototypeChain; } @@ -608,7 +608,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance( // OrdinaryHasInstance on bound functions turns into a recursive invocation // of the instanceof operator again. JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction(); - if (FLAG_concurrent_inlining && !function.serialized()) { + if (should_disallow_heap_access() && !function.serialized()) { TRACE_BROKER_MISSING(broker(), "data for JSBoundFunction " << function); return NoChange(); } @@ -627,7 +627,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance( // Optimize if we currently know the "prototype" property. JSFunctionRef function = m.Ref(broker()).AsJSFunction(); - if (FLAG_concurrent_inlining && !function.serialized()) { + if (should_disallow_heap_access() && !function.serialized()) { TRACE_BROKER_MISSING(broker(), "data for JSFunction " << function); return NoChange(); } @@ -706,7 +706,7 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) { ZoneVector access_infos(graph()->zone()); AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); - if (!FLAG_concurrent_inlining) { + if (!should_disallow_heap_access()) { access_info_factory.ComputePropertyAccessInfos( resolution_maps, factory()->then_string(), AccessMode::kLoad, &access_infos); @@ -1078,8 +1078,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( if (map.is_deprecated()) continue; PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( map, feedback.name(), access_mode, dependencies(), - FLAG_concurrent_inlining ? SerializationPolicy::kAssumeSerialized - : SerializationPolicy::kSerializeIfNeeded); + should_disallow_heap_access() + ? SerializationPolicy::kAssumeSerialized + : SerializationPolicy::kSerializeIfNeeded); access_infos_for_feedback.push_back(access_info); } @@ -1326,7 +1327,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { name.equals(ObjectRef(broker(), factory()->prototype_string()))) { // Optimize "prototype" property of functions. JSFunctionRef function = object.AsJSFunction(); - if (FLAG_concurrent_inlining && !function.serialized()) { + if (should_disallow_heap_access() && !function.serialized()) { TRACE_BROKER_MISSING(broker(), "data for function " << function); return NoChange(); } @@ -1622,7 +1623,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( base::Optional typed_array = GetTypedArrayConstant(broker(), receiver); if (typed_array.has_value()) { - if (FLAG_concurrent_inlining && !typed_array->serialized()) { + if (should_disallow_heap_access() && !typed_array->serialized()) { TRACE_BROKER_MISSING(broker(), "data for typed array " << *typed_array); return NoChange(); } @@ -1839,7 +1840,7 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant( Reduction JSNativeContextSpecialization::ReducePropertyAccess( Node* node, Node* key, base::Optional static_name, Node* value, FeedbackSource const& source, AccessMode access_mode) { - DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access()); DCHECK_EQ(key == nullptr, static_name.has_value()); DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h index c02d65b547..e2fd695387 100644 --- a/src/compiler/js-native-context-specialization.h +++ b/src/compiler/js-native-context-specialization.h @@ -44,7 +44,11 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final : public AdvancedReducer { public: // Flags that control the mode of operation. - enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 0 }; + enum Flag { + kNoFlags = 0u, + kBailoutOnUninitialized = 1u << 0, + kConcurrentInlining = 1u << 1 + }; using Flags = base::Flags; JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph, @@ -248,6 +252,10 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final Zone* zone() const { return zone_; } Zone* shared_zone() const { return shared_zone_; } + bool should_disallow_heap_access() const { + return flags() & kConcurrentInlining; + } + JSGraph* const jsgraph_; JSHeapBroker* const broker_; Flags const flags_; diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index 714f4d2d66..605463e46c 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -132,7 +132,8 @@ class PipelineData { // For main entry point. PipelineData(ZoneStats* zone_stats, Isolate* isolate, OptimizedCompilationInfo* info, - PipelineStatistics* pipeline_statistics) + PipelineStatistics* pipeline_statistics, + bool is_concurrent_inlining) : isolate_(isolate), allocator_(isolate->allocator()), info_(info), @@ -150,7 +151,8 @@ class PipelineData { codegen_zone_scope_(zone_stats_, kCodegenZoneName), codegen_zone_(codegen_zone_scope_.zone()), broker_(new JSHeapBroker(isolate_, info_->zone(), - info_->trace_heap_broker_enabled())), + info_->trace_heap_broker_enabled(), + is_concurrent_inlining)), register_allocation_zone_scope_(zone_stats_, kRegisterAllocationZoneName), register_allocation_zone_(register_allocation_zone_scope_.zone()), @@ -968,7 +970,8 @@ class PipelineCompilationJob final : public OptimizedCompilationJob { public: PipelineCompilationJob(Isolate* isolate, Handle shared_info, - Handle function); + Handle function, + bool is_concurrent_inlining); ~PipelineCompilationJob() final; protected: @@ -993,7 +996,7 @@ class PipelineCompilationJob final : public OptimizedCompilationJob { PipelineCompilationJob::PipelineCompilationJob( Isolate* isolate, Handle shared_info, - Handle function) + Handle function, bool is_concurrent_inlining) // Note that the OptimizedCompilationInfo is not initialized at the time // we pass it to the CompilationJob constructor, but it is not // dereferenced there. @@ -1006,10 +1009,9 @@ PipelineCompilationJob::PipelineCompilationJob( handle(Script::cast(shared_info->script()), isolate), compilation_info(), function->GetIsolate(), &zone_stats_)), data_(&zone_stats_, function->GetIsolate(), compilation_info(), - pipeline_statistics_.get()), + pipeline_statistics_.get(), is_concurrent_inlining), pipeline_(&data_), - linkage_(nullptr) { -} + linkage_(nullptr) {} PipelineCompilationJob::~PipelineCompilationJob() { } @@ -1030,6 +1032,9 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( if (FLAG_turbo_inlining) { compilation_info()->MarkAsInliningEnabled(); } + if (FLAG_concurrent_inlining && !compilation_info()->is_osr()) { + compilation_info()->MarkAsConcurrentInlining(); + } // This is the bottleneck for computing and setting poisoning level in the // optimizing compiler. @@ -1078,7 +1083,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( pipeline_.Serialize(); - if (!FLAG_concurrent_inlining) { + if (!compilation_info()->is_concurrent_inlining()) { if (!pipeline_.CreateGraph()) { CHECK(!isolate->has_pending_exception()); return AbortOptimization(BailoutReason::kGraphBuildingFailed); @@ -1110,7 +1115,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl( // Ensure that the RuntimeCallStats table is only available during execution // and not during finalization as that might be on a different thread. PipelineExecuteJobScope scope(&data_, stats); - if (FLAG_concurrent_inlining) { + if (compilation_info()->is_concurrent_inlining()) { if (!pipeline_.CreateGraph()) { return AbortOptimization(BailoutReason::kGraphBuildingFailed); } @@ -1323,6 +1328,9 @@ struct GraphBuilderPhase { if (data->info()->is_bailout_on_uninitialized()) { flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized; } + if (data->info()->is_concurrent_inlining()) { + flags |= BytecodeGraphBuilderFlag::kConcurrentInlining; + } JSFunctionRef closure(data->broker(), data->info()->closure()); CallFrequency frequency(1.0f); @@ -1347,11 +1355,15 @@ struct InliningPhase { CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), data->broker(), data->common(), data->machine(), temp_zone); + JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags; + if (data->info()->is_bailout_on_uninitialized()) { + call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized; + } + if (data->info()->is_concurrent_inlining()) { + call_reducer_flags |= JSCallReducer::kConcurrentInlining; + } JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(), - temp_zone, - data->info()->is_bailout_on_uninitialized() - ? JSCallReducer::kBailoutOnUninitialized - : JSCallReducer::kNoFlags, + temp_zone, call_reducer_flags, data->dependencies()); JSContextSpecialization context_specialization( &graph_reducer, data->jsgraph(), data->broker(), @@ -1364,6 +1376,9 @@ struct InliningPhase { if (data->info()->is_bailout_on_uninitialized()) { flags |= JSNativeContextSpecialization::kBailoutOnUninitialized; } + if (data->info()->is_concurrent_inlining()) { + flags |= JSNativeContextSpecialization::kConcurrentInlining; + } // Passing the OptimizedCompilationInfo's shared zone here as // JSNativeContextSpecialization allocates out-of-heap objects // that need to live until code generation. @@ -1373,8 +1388,15 @@ struct InliningPhase { JSInliningHeuristic inlining(&graph_reducer, temp_zone, data->info(), data->jsgraph(), data->broker(), data->source_positions()); + + JSIntrinsicLowering::Flags intrinsic_lowering_flags = + JSIntrinsicLowering::kNoFlags; + if (data->info()->is_concurrent_inlining()) { + intrinsic_lowering_flags |= JSIntrinsicLowering::kConcurrentInlining; + } JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(), - data->broker()); + data->broker(), + intrinsic_lowering_flags); AddReducer(data, &graph_reducer, &dead_code_elimination); AddReducer(data, &graph_reducer, &checkpoint_elimination); AddReducer(data, &graph_reducer, &common_reducer); @@ -2349,7 +2371,7 @@ void PipelineImpl::Serialize() { } data->broker()->SetTargetNativeContextRef(data->native_context()); - if (FLAG_concurrent_inlining) { + if (data->info()->is_concurrent_inlining()) { Run(); Run(); data->broker()->StopSerializing(); @@ -2389,7 +2411,7 @@ bool PipelineImpl::CreateGraph() { // Run the type-sensitive lowerings and optimizations on the graph. { - if (!FLAG_concurrent_inlining) { + if (!data->info()->is_concurrent_inlining()) { Run(); Run(); data->broker()->StopSerializing(); @@ -2786,7 +2808,12 @@ MaybeHandle Pipeline::GenerateCodeForTesting( std::unique_ptr pipeline_statistics( CreatePipelineStatistics(Handle