diff --git a/src/heap/factory.cc b/src/heap/factory.cc index 38c318b0e5..533d28cbd1 100644 --- a/src/heap/factory.cc +++ b/src/heap/factory.cc @@ -509,6 +509,8 @@ Handle Factory::NewFeedbackVector( vector.set_length(length); vector.set_invocation_count(0); vector.set_profiler_ticks(0); + vector.set_placeholder0(0); + vector.reset_osr_state(); vector.reset_flags(); vector.set_closure_feedback_cell_array(*closure_feedback_cell_array); diff --git a/src/objects/feedback-vector-inl.h b/src/objects/feedback-vector-inl.h index 7dbf9a801b..e2445de393 100644 --- a/src/objects/feedback-vector-inl.h +++ b/src/objects/feedback-vector-inl.h @@ -123,6 +123,32 @@ void FeedbackVector::clear_invocation_count(RelaxedStoreTag tag) { set_invocation_count(0, tag); } +int FeedbackVector::osr_urgency() const { + return OsrUrgencyBits::decode(osr_state()); +} + +void FeedbackVector::set_osr_urgency(int urgency) { + DCHECK(0 <= urgency && urgency <= FeedbackVector::kMaxOsrUrgency); + STATIC_ASSERT(FeedbackVector::kMaxOsrUrgency <= OsrUrgencyBits::kMax); + set_osr_state(OsrUrgencyBits::update(osr_state(), urgency)); +} + +void FeedbackVector::reset_osr_urgency() { set_osr_urgency(0); } + +void FeedbackVector::RequestOsrAtNextOpportunity() { + set_osr_urgency(kMaxOsrUrgency); +} + +void FeedbackVector::reset_osr_state() { set_osr_state(0); } + +bool FeedbackVector::maybe_has_optimized_osr_code() const { + return MaybeHasOptimizedOsrCodeBit::decode(osr_state()); +} + +void FeedbackVector::set_maybe_has_optimized_osr_code(bool value) { + set_osr_state(MaybeHasOptimizedOsrCodeBit::update(osr_state(), value)); +} + CodeT FeedbackVector::optimized_code() const { MaybeObject slot = maybe_optimized_code(kAcquireLoad); DCHECK(slot->IsWeakOrCleared()); @@ -155,6 +181,22 @@ void FeedbackVector::set_maybe_has_optimized_code(bool value) { set_flags(MaybeHasOptimizedCodeBit::update(flags(), value)); } +base::Optional FeedbackVector::GetOptimizedOsrCode(Isolate* isolate, + FeedbackSlot slot) { + MaybeObject maybe_code = Get(isolate, slot); + if (maybe_code->IsCleared()) return {}; + + CodeT codet = CodeT::cast(maybe_code->GetHeapObject()); + if (codet.marked_for_deoptimization()) { + // Clear the cached Code object if deoptimized. + // TODO(jgruber): Add tracing. + Set(slot, HeapObjectReference::ClearedValue(isolate)); + return {}; + } + + return codet; +} + // Conversion from an integer index to either a slot or an ic slot. // static FeedbackSlot FeedbackVector::ToSlot(intptr_t index) { diff --git a/src/objects/feedback-vector.cc b/src/objects/feedback-vector.cc index e5da1d3830..7dcb2b80bc 100644 --- a/src/objects/feedback-vector.cc +++ b/src/objects/feedback-vector.cc @@ -411,6 +411,13 @@ void FeedbackVector::ClearOptimizedCode() { set_maybe_has_optimized_code(false); } +void FeedbackVector::SetOptimizedOsrCode(FeedbackSlot slot, CodeT code) { + DCHECK(CodeKindIsOptimizedJSFunction(code.kind())); + DCHECK(!slot.IsInvalid()); + Set(slot, HeapObjectReference::Weak(code)); + set_maybe_has_optimized_osr_code(true); +} + void FeedbackVector::reset_tiering_state() { set_tiering_state(TieringState::kNone); } @@ -423,8 +430,9 @@ void FeedbackVector::set_tiering_state(TieringState state) { void FeedbackVector::reset_flags() { set_flags(TieringStateBits::encode(TieringState::kNone) | + MaybeHasOptimizedCodeBit::encode(false) | OsrTieringStateBit::encode(TieringState::kNone) | - MaybeHasOptimizedCodeBit::encode(false)); + MaybeHasOptimizedOsrCodeBit::encode(false)); } TieringState FeedbackVector::osr_tiering_state() { diff --git a/src/objects/feedback-vector.h b/src/objects/feedback-vector.h index 2aebbc3e32..244a80b250 100644 --- a/src/objects/feedback-vector.h +++ b/src/objects/feedback-vector.h @@ -197,6 +197,7 @@ class FeedbackVector : public TorqueGeneratedFeedbackVector { public: NEVER_READ_ONLY_SPACE + DEFINE_TORQUE_GENERATED_OSR_STATE() DEFINE_TORQUE_GENERATED_FEEDBACK_VECTOR_FLAGS() STATIC_ASSERT(TieringState::kLastTieringState <= TieringStateBits::kMax); @@ -224,6 +225,27 @@ class FeedbackVector DECL_RELAXED_INT32_ACCESSORS(invocation_count) inline void clear_invocation_count(RelaxedStoreTag tag); + // The [osr_urgency] controls when OSR is attempted, and is incremented as + // the function becomes hotter. When the current loop depth is less than the + // osr_urgency, JumpLoop calls into runtime to attempt OSR optimization. + static constexpr int kMaxOsrUrgency = 6; + STATIC_ASSERT(kMaxOsrUrgency <= OsrUrgencyBits::kMax); + inline int osr_urgency() const; + inline void set_osr_urgency(int urgency); + inline void reset_osr_urgency(); + inline void RequestOsrAtNextOpportunity(); + + // Whether this vector may contain cached optimized osr code for *any* slot. + // Represented internally as a bit that can be efficiently checked by + // generated code. May diverge from the state of the world; the invariant is + // that if `maybe_has_optimized_osr_code` is false, no optimized osr code + // exists. + inline bool maybe_has_optimized_osr_code() const; + inline void set_maybe_has_optimized_osr_code(bool value); + + // The `osr_state` contains the osr_urgency and maybe_has_optimized_osr_code. + inline void reset_osr_state(); + inline CodeT optimized_code() const; // Whether maybe_optimized_code contains a cached Code object. inline bool has_optimized_code() const; @@ -237,6 +259,12 @@ class FeedbackVector const char* reason); void ClearOptimizedCode(); + // Optimized OSR'd code is cached in JumpLoop feedback vector slots. The + // slots either contain a Code object or the ClearedValue. + inline base::Optional GetOptimizedOsrCode(Isolate* isolate, + FeedbackSlot slot); + void SetOptimizedOsrCode(FeedbackSlot slot, CodeT code); + inline TieringState tiering_state() const; void set_tiering_state(TieringState state); void reset_tiering_state(); diff --git a/src/objects/feedback-vector.tq b/src/objects/feedback-vector.tq index 97a2ca80ad..1df849fd4e 100644 --- a/src/objects/feedback-vector.tq +++ b/src/objects/feedback-vector.tq @@ -4,7 +4,7 @@ type TieringState extends uint16 constexpr 'TieringState'; -bitfield struct FeedbackVectorFlags extends uint32 { +bitfield struct FeedbackVectorFlags extends uint16 { tiering_state: TieringState: 3 bit; // Whether the maybe_optimized_code field contains a code object. 'maybe', // because they flag may lag behind the actual state of the world (it will be @@ -12,17 +12,31 @@ bitfield struct FeedbackVectorFlags extends uint32 { maybe_has_optimized_code: bool: 1 bit; // Just one bit, since only {kNone,kInProgress} are relevant for OSR. osr_tiering_state: TieringState: 1 bit; - all_your_bits_are_belong_to_jgruber: uint32: 27 bit; + all_your_bits_are_belong_to_jgruber: uint32: 11 bit; +} + +bitfield struct OsrState extends uint8 { + // The layout is chosen s.t. osr_urgency and maybe_has_optimized_osr_code can + // be loaded with a single load (i.e. no masking required). + osr_urgency: uint32: 3 bit; + maybe_has_optimized_osr_code: bool: 1 bit; + // In order to have fast OSR checks in Ignition and Sparkplug, these bits + // should remain 0. That way, the OSR check can be implemented as a single + // comparison. + dont_use_these_bits_unless_beneficial: uint32: 4 bit; } @generateBodyDescriptor extern class FeedbackVector extends HeapObject { const length: int32; invocation_count: int32; + // TODO(jgruber): We don't need 32 bits to count profiler_ticks (something + // like 4 bits seems sufficient). profiler_ticks: int32; - // TODO(turboprop, v8:11010): This field could be removed by changing the - // tier up checks for Turboprop. If removing this field also check v8:9287. - // Padding was necessary for GCMole. + // TODO(jgruber): These bits are available and could be merged with + // profiler_ticks. + placeholder0: uint8; + osr_state: OsrState; flags: FeedbackVectorFlags; shared_function_info: SharedFunctionInfo; closure_feedback_cell_array: ClosureFeedbackCellArray;