Reland "[osr] Use the new OSR cache"
This is a reland of commit 91da38831d
Fixed: Use an X register for JumpIfCodeTIsMarkedForDeoptimization
on arm64.
Original change's description:
> [osr] Use the new OSR cache
>
> This CL switches over our OSR system to be based on the feedback
> vector osr caches.
>
> - OSRing to Sparkplug is fully separated from OSR urgency. If
> SP code exists, we simply jump to it, no need to maintain an
> installation request.
> - Each JumpLoop checks its dedicated FeedbackVector cache slot.
> If a valid target code object exists, we enter it *without*
> calling into runtime to fetch the code object.
> - Finally, OSR urgency still remains as the heuristic for
> requesting Turbofan OSR compile jobs. Note it no longer has a
> double purpose of being a generic untargeted installation
> request.
>
> With the new system in place, we can remove now-unnecessary
> hacks:
>
> - Early OSR tierup is replaced by the standard OSR system. Any
> present OSR code is automatically entered.
> - The synchronous OSR compilation fallback is removed. With
> precise installation (= per-JumpLoop-bytecode) we no longer
> have the problem of 'getting unlucky' with JumpLoop/cache entry
> mismatches. Execution has moved on while compiling? Simply spawn
> a new concurrent compile job.
> - Remove the synchronous (non-OSR) Turbofan compile request now
> that we always enter available OSR code as early as possible.
> - Tiering into Sparkplug no longer messes with OSR state.
>
> Bug: v8:12161
> Change-Id: I0a85e53d363504b7dac174dbaf69c03c35e66700
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3596167
> Commit-Queue: Jakob Linke <jgruber@chromium.org>
> Auto-Submit: Jakob Linke <jgruber@chromium.org>
> Reviewed-by: Leszek Swirski <leszeks@chromium.org>
> Commit-Queue: Leszek Swirski <leszeks@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#80147}
Bug: v8:12161
Change-Id: Ib3597cf1d99cdb5d0f2c5ac18e311914f376231d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3606232
Auto-Submit: Jakob Linke <jgruber@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80167}
This commit is contained in:
parent
07601b124e
commit
9145388055
@ -1784,9 +1784,6 @@ filegroup(
|
||||
"src/objects/ordered-hash-table-inl.h",
|
||||
"src/objects/ordered-hash-table.cc",
|
||||
"src/objects/ordered-hash-table.h",
|
||||
"src/objects/osr-optimized-code-cache-inl.h",
|
||||
"src/objects/osr-optimized-code-cache.cc",
|
||||
"src/objects/osr-optimized-code-cache.h",
|
||||
"src/objects/primitive-heap-object-inl.h",
|
||||
"src/objects/primitive-heap-object.h",
|
||||
"src/objects/promise-inl.h",
|
||||
|
3
BUILD.gn
3
BUILD.gn
@ -3259,8 +3259,6 @@ v8_header_set("v8_internal_headers") {
|
||||
"src/objects/option-utils.h",
|
||||
"src/objects/ordered-hash-table-inl.h",
|
||||
"src/objects/ordered-hash-table.h",
|
||||
"src/objects/osr-optimized-code-cache-inl.h",
|
||||
"src/objects/osr-optimized-code-cache.h",
|
||||
"src/objects/primitive-heap-object-inl.h",
|
||||
"src/objects/primitive-heap-object.h",
|
||||
"src/objects/promise-inl.h",
|
||||
@ -4405,7 +4403,6 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/objects/objects.cc",
|
||||
"src/objects/option-utils.cc",
|
||||
"src/objects/ordered-hash-table.cc",
|
||||
"src/objects/osr-optimized-code-cache.cc",
|
||||
"src/objects/property-descriptor.cc",
|
||||
"src/objects/property.cc",
|
||||
"src/objects/scope-info.cc",
|
||||
|
@ -407,6 +407,32 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
||||
__ str(value, FieldMemOperand(target, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Register feedback_vector,
|
||||
FeedbackSlot slot,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch = temps.AcquireScratch();
|
||||
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch);
|
||||
__ b(eq, on_result);
|
||||
__ mov(scratch, __ ClearedValue());
|
||||
StoreTaggedFieldNoWriteBarrier(
|
||||
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
|
||||
scratch);
|
||||
}
|
||||
|
||||
__ bind(&fallthrough);
|
||||
Move(scratch_and_result, 0);
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
int32_t weight, Label* skip_interrupt_label) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
|
@ -479,6 +479,34 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
||||
__ StoreTaggedField(value, FieldMemOperand(target, offset));
|
||||
}
|
||||
|
||||
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Register feedback_vector,
|
||||
FeedbackSlot slot,
|
||||
Label* on_result,
|
||||
Label::Distance) {
|
||||
Label fallthrough, clear_slot;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
__ JumpIfCodeTIsMarkedForDeoptimization(
|
||||
scratch_and_result, temps.AcquireScratch(), &clear_slot);
|
||||
__ B(on_result);
|
||||
}
|
||||
|
||||
__ bind(&clear_slot);
|
||||
__ Mov(scratch_and_result, __ ClearedValue());
|
||||
StoreTaggedFieldNoWriteBarrier(
|
||||
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
|
||||
scratch_and_result);
|
||||
|
||||
__ bind(&fallthrough);
|
||||
Move(scratch_and_result, 0);
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
int32_t weight, Label* skip_interrupt_label) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
|
@ -132,6 +132,11 @@ void BaselineAssembler::StoreRegister(interpreter::Register output,
|
||||
Move(output, value);
|
||||
}
|
||||
|
||||
template <typename Field>
|
||||
void BaselineAssembler::DecodeField(Register reg) {
|
||||
__ DecodeField<Field>(reg);
|
||||
}
|
||||
|
||||
SaveAccumulatorScope::SaveAccumulatorScope(BaselineAssembler* assembler)
|
||||
: assembler_(assembler) {
|
||||
assembler_->Push(kInterpreterAccumulatorRegister);
|
||||
|
@ -40,6 +40,9 @@ class BaselineAssembler {
|
||||
inline void Trap();
|
||||
inline void DebugBreak();
|
||||
|
||||
template <typename Field>
|
||||
inline void DecodeField(Register reg);
|
||||
|
||||
inline void Bind(Label* label);
|
||||
// Binds the label without marking it as a valid jump target.
|
||||
// This is only useful, when the position is already marked as a valid jump
|
||||
@ -167,6 +170,13 @@ class BaselineAssembler {
|
||||
int32_t index);
|
||||
inline void LoadPrototype(Register prototype, Register object);
|
||||
|
||||
// Falls through and sets scratch_and_result to 0 on failure, jumps to
|
||||
// on_result on success.
|
||||
inline void TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Register feedback_vector,
|
||||
FeedbackSlot slot, Label* on_result,
|
||||
Label::Distance distance);
|
||||
|
||||
// Loads the feedback cell from the function, and sets flags on add so that
|
||||
// we can compare afterward.
|
||||
inline void AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
|
@ -73,10 +73,6 @@ class BaselineCompilerTask {
|
||||
}
|
||||
|
||||
shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore);
|
||||
if (V8_LIKELY(FLAG_use_osr)) {
|
||||
shared_function_info_->GetBytecodeArray(isolate)
|
||||
.RequestOsrAtNextOpportunity();
|
||||
}
|
||||
if (FLAG_trace_baseline_concurrent_compilation) {
|
||||
CodeTracer::Scope scope(isolate->GetCodeTracer());
|
||||
std::stringstream ss;
|
||||
|
@ -1929,24 +1929,30 @@ void BaselineCompiler::VisitJumpLoop() {
|
||||
{
|
||||
ASM_CODE_COMMENT_STRING(&masm_, "OSR Check Armed");
|
||||
using D = BaselineOnStackReplacementDescriptor;
|
||||
Register osr_urgency_and_install_target =
|
||||
D::OsrUrgencyAndInstallTargetRegister();
|
||||
__ LoadRegister(osr_urgency_and_install_target,
|
||||
interpreter::Register::bytecode_array());
|
||||
__ LoadWord16FieldZeroExtend(
|
||||
osr_urgency_and_install_target, osr_urgency_and_install_target,
|
||||
BytecodeArray::kOsrUrgencyAndInstallTargetOffset);
|
||||
BaselineAssembler::ScratchRegisterScope temps(&basm_);
|
||||
Register feedback_vector = temps.AcquireScratch();
|
||||
Register osr_state = temps.AcquireScratch();
|
||||
LoadFeedbackVector(feedback_vector);
|
||||
__ LoadWord8Field(osr_state, feedback_vector,
|
||||
FeedbackVector::kOsrStateOffset);
|
||||
const int loop_depth = iterator().GetImmediateOperand(1);
|
||||
__ JumpIfImmediate(Condition::kUnsignedLessThanEqual,
|
||||
osr_urgency_and_install_target, loop_depth,
|
||||
&osr_not_armed, Label::kNear);
|
||||
static_assert(FeedbackVector::MaybeHasOptimizedOsrCodeBit::encode(true) >
|
||||
FeedbackVector::kMaxOsrUrgency);
|
||||
__ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_state, loop_depth,
|
||||
&osr_not_armed, Label::kNear);
|
||||
|
||||
const int encoded_current_offset =
|
||||
BytecodeArray::OsrInstallTargetFor(
|
||||
BytecodeOffset{iterator().current_offset()})
|
||||
<< BytecodeArray::OsrInstallTargetBits::kShift;
|
||||
CallBuiltin<Builtin::kBaselineOnStackReplacement>(
|
||||
loop_depth, encoded_current_offset, osr_urgency_and_install_target);
|
||||
Label osr;
|
||||
Register maybe_target_code = D::MaybeTargetCodeRegister();
|
||||
DCHECK(!AreAliased(maybe_target_code, feedback_vector, osr_state));
|
||||
__ TryLoadOptimizedOsrCode(maybe_target_code, feedback_vector,
|
||||
iterator().GetSlotOperand(2), &osr,
|
||||
Label::kNear);
|
||||
__ DecodeField<FeedbackVector::OsrUrgencyBits>(osr_state);
|
||||
__ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_state, loop_depth,
|
||||
&osr_not_armed, Label::kNear);
|
||||
|
||||
__ Bind(&osr);
|
||||
CallBuiltin<Builtin::kBaselineOnStackReplacement>(maybe_target_code);
|
||||
}
|
||||
|
||||
__ Bind(&osr_not_armed);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "src/baseline/baseline-assembler.h"
|
||||
#include "src/codegen/ia32/register-ia32.h"
|
||||
#include "src/codegen/interface-descriptors.h"
|
||||
#include "src/objects/feedback-vector.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -378,6 +379,32 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
||||
__ mov(FieldOperand(target, offset), value);
|
||||
}
|
||||
|
||||
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Register feedback_vector,
|
||||
FeedbackSlot slot,
|
||||
Label* on_result,
|
||||
Label::Distance distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, &fallthrough);
|
||||
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
ScratchRegisterScope temps(this);
|
||||
Register scratch2 = temps.AcquireScratch();
|
||||
DCHECK(!AreAliased(scratch_and_result, scratch2));
|
||||
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch2);
|
||||
__ j(equal, on_result, distance);
|
||||
__ mov(FieldOperand(feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())),
|
||||
__ ClearedValue());
|
||||
}
|
||||
|
||||
__ bind(&fallthrough);
|
||||
__ Move(scratch_and_result, 0);
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
int32_t weight, Label* skip_interrupt_label) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "src/base/macros.h"
|
||||
#include "src/baseline/baseline-assembler.h"
|
||||
#include "src/codegen/x64/register-x64.h"
|
||||
#include "src/objects/feedback-vector.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -373,6 +374,31 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
||||
__ StoreTaggedField(FieldOperand(target, offset), value);
|
||||
}
|
||||
|
||||
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||
Register feedback_vector,
|
||||
FeedbackSlot slot,
|
||||
Label* on_result,
|
||||
Label::Distance distance) {
|
||||
Label fallthrough;
|
||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||
__ LoadWeakValue(scratch_and_result, &fallthrough);
|
||||
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
{
|
||||
DCHECK(!AreAliased(scratch_and_result, kScratchRegister));
|
||||
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, kScratchRegister);
|
||||
__ j(equal, on_result, distance);
|
||||
__ StoreTaggedField(
|
||||
FieldOperand(feedback_vector,
|
||||
FeedbackVector::OffsetOfElementAt(slot.ToInt())),
|
||||
__ ClearedValue());
|
||||
}
|
||||
|
||||
__ bind(&fallthrough);
|
||||
__ Move(scratch_and_result, 0);
|
||||
}
|
||||
|
||||
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
||||
int32_t weight, Label* skip_interrupt_label) {
|
||||
ASM_CODE_COMMENT(masm_);
|
||||
|
@ -903,12 +903,12 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
__ ldr(scratch,
|
||||
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
|
||||
__ ldr(scratch,
|
||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
__ b(ne, &heal_optimized_code_slot);
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry,
|
||||
temps.Acquire());
|
||||
__ b(ne, &heal_optimized_code_slot);
|
||||
}
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure
|
||||
// into the optimized functions list, then tail call the optimized code.
|
||||
@ -1067,15 +1067,24 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
|
||||
namespace {
|
||||
|
||||
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
|
||||
Register scratch) {
|
||||
// Reset the bytecode age and OSR state (optimized to a single write).
|
||||
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
|
||||
void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array,
|
||||
Register scratch) {
|
||||
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
|
||||
DCHECK(!AreAliased(bytecode_array, scratch));
|
||||
__ mov(scratch, Operand(0));
|
||||
__ str(scratch,
|
||||
FieldMemOperand(bytecode_array,
|
||||
BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
|
||||
__ strh(scratch,
|
||||
FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset));
|
||||
}
|
||||
|
||||
void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
|
||||
Register feedback_vector, Register scratch) {
|
||||
DCHECK(!AreAliased(feedback_vector, scratch));
|
||||
__ ldrb(scratch,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
|
||||
__ and_(scratch, scratch,
|
||||
Operand(FeedbackVector::MaybeHasOptimizedOsrCodeBit::kMask));
|
||||
__ strb(scratch,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -1116,6 +1125,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
&has_optimized_code_or_state);
|
||||
}
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
|
||||
}
|
||||
|
||||
// Increment invocation count for the function.
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1152,7 +1166,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetBytecodeAgeAndOsrState(masm, bytecodeArray, temps.Acquire());
|
||||
ResetBytecodeAge(masm, bytecodeArray, temps.Acquire());
|
||||
}
|
||||
__ Push(argc, bytecodeArray);
|
||||
|
||||
@ -1276,6 +1290,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
|
||||
}
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
|
||||
@ -1293,7 +1312,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||
__ PushStandardFrame(closure);
|
||||
|
||||
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r9);
|
||||
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, r9);
|
||||
|
||||
// Load the initial bytecode offset.
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
@ -1822,37 +1841,15 @@ enum class OsrSourceTier {
|
||||
};
|
||||
|
||||
void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
Register current_loop_depth,
|
||||
Register encoded_current_bytecode_offset,
|
||||
Register osr_urgency_and_install_target) {
|
||||
static constexpr Register scratch = r3;
|
||||
DCHECK(!AreAliased(scratch, current_loop_depth,
|
||||
encoded_current_bytecode_offset,
|
||||
osr_urgency_and_install_target));
|
||||
// OSR based on urgency, i.e. is the OSR urgency greater than the current
|
||||
// loop depth?
|
||||
Label try_osr;
|
||||
STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
|
||||
Register urgency = scratch;
|
||||
__ and_(urgency, osr_urgency_and_install_target,
|
||||
Operand(BytecodeArray::OsrUrgencyBits::kMask));
|
||||
__ cmp(urgency, current_loop_depth);
|
||||
__ b(hi, &try_osr);
|
||||
|
||||
// OSR based on the install target offset, i.e. does the current bytecode
|
||||
// offset match the install target offset?
|
||||
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
|
||||
Register install_target = osr_urgency_and_install_target;
|
||||
__ and_(install_target, osr_urgency_and_install_target, Operand(kMask));
|
||||
__ cmp(install_target, encoded_current_bytecode_offset);
|
||||
__ b(eq, &try_osr);
|
||||
|
||||
// Neither urgency nor the install target triggered, return to the caller.
|
||||
// Note: the return value must be nullptr or a valid Code object.
|
||||
__ Move(r0, Operand(0));
|
||||
__ Ret(0);
|
||||
|
||||
__ bind(&try_osr);
|
||||
Register maybe_target_code) {
|
||||
Label jump_to_optimized_code;
|
||||
{
|
||||
// If maybe_target_code is not null, no need to call into runtime. A
|
||||
// precondition here is: if maybe_target_code is a Code object, it must NOT
|
||||
// be marked_for_deoptimization (callers must ensure this).
|
||||
__ cmp(maybe_target_code, Operand(Smi::zero()));
|
||||
__ b(ne, &jump_to_optimized_code);
|
||||
}
|
||||
|
||||
ASM_CODE_COMMENT(masm);
|
||||
{
|
||||
@ -1861,12 +1858,12 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
}
|
||||
|
||||
// If the code object is null, just return to the caller.
|
||||
Label skip;
|
||||
__ cmp(r0, Operand(Smi::zero()));
|
||||
__ b(ne, &skip);
|
||||
__ b(ne, &jump_to_optimized_code);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&skip);
|
||||
__ bind(&jump_to_optimized_code);
|
||||
DCHECK_EQ(maybe_target_code, r0); // Already in the right spot.
|
||||
|
||||
if (source == OsrSourceTier::kInterpreter) {
|
||||
// Drop the handler frame that is be sitting on top of the actual
|
||||
@ -1895,23 +1892,19 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
|
||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
using D = InterpreterOnStackReplacementDescriptor;
|
||||
STATIC_ASSERT(D::kParameterCount == 3);
|
||||
STATIC_ASSERT(D::kParameterCount == 1);
|
||||
OnStackReplacement(masm, OsrSourceTier::kInterpreter,
|
||||
D::CurrentLoopDepthRegister(),
|
||||
D::EncodedCurrentBytecodeOffsetRegister(),
|
||||
D::OsrUrgencyAndInstallTargetRegister());
|
||||
D::MaybeTargetCodeRegister());
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
||||
using D = BaselineOnStackReplacementDescriptor;
|
||||
STATIC_ASSERT(D::kParameterCount == 3);
|
||||
STATIC_ASSERT(D::kParameterCount == 1);
|
||||
|
||||
__ ldr(kContextRegister,
|
||||
MemOperand(fp, BaselineFrameConstants::kContextOffset));
|
||||
OnStackReplacement(masm, OsrSourceTier::kBaseline,
|
||||
D::CurrentLoopDepthRegister(),
|
||||
D::EncodedCurrentBytecodeOffsetRegister(),
|
||||
D::OsrUrgencyAndInstallTargetRegister());
|
||||
D::MaybeTargetCodeRegister());
|
||||
}
|
||||
|
||||
// static
|
||||
@ -3703,11 +3696,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
__ Pop(kInterpreterAccumulatorRegister);
|
||||
|
||||
if (is_osr) {
|
||||
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't
|
||||
// disarm Sparkplug here.
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister,
|
||||
temps.Acquire());
|
||||
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, temps.Acquire());
|
||||
Generate_OSREntry(masm, code_obj,
|
||||
Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
} else {
|
||||
|
@ -1078,23 +1078,8 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
__ AssertCodeT(optimized_code_entry);
|
||||
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
|
||||
__ Ldr(scratch.W(),
|
||||
FieldMemOperand(optimized_code_entry,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
|
||||
&heal_optimized_code_slot);
|
||||
|
||||
} else {
|
||||
__ LoadTaggedPointerField(
|
||||
scratch,
|
||||
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
|
||||
__ Ldr(
|
||||
scratch.W(),
|
||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
__ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
|
||||
&heal_optimized_code_slot);
|
||||
}
|
||||
__ JumpIfCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch,
|
||||
&heal_optimized_code_slot);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure into
|
||||
// the optimized functions list, then tail call the optimized code.
|
||||
@ -1250,14 +1235,21 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
|
||||
namespace {
|
||||
|
||||
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
|
||||
Register bytecode_array) {
|
||||
// Reset the bytecode age and OSR state (optimized to a single write).
|
||||
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
|
||||
void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array) {
|
||||
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
|
||||
__ Str(wzr,
|
||||
FieldMemOperand(bytecode_array,
|
||||
BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
|
||||
__ Strh(wzr,
|
||||
FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset));
|
||||
}
|
||||
|
||||
void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
|
||||
Register feedback_vector, Register scratch) {
|
||||
DCHECK(!AreAliased(feedback_vector, scratch));
|
||||
__ Ldrb(scratch,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
|
||||
__ And(scratch, scratch,
|
||||
Operand(FeedbackVector::MaybeHasOptimizedOsrCodeBit::kMask));
|
||||
__ Strb(scratch,
|
||||
FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -1290,6 +1282,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.AcquireW());
|
||||
}
|
||||
|
||||
// Increment invocation count for the function.
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
@ -1324,7 +1321,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
// the frame, so load it into a register.
|
||||
Register bytecode_array = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
|
||||
ResetBytecodeAge(masm, bytecode_array);
|
||||
__ Push(argc, bytecode_array);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
@ -1455,6 +1452,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.AcquireW());
|
||||
}
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
|
||||
@ -1474,7 +1476,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ mov(fp, sp);
|
||||
__ Push(cp, closure);
|
||||
|
||||
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
|
||||
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
|
||||
|
||||
// Load the initial bytecode offset.
|
||||
__ Mov(kInterpreterBytecodeOffsetRegister,
|
||||
@ -2076,37 +2078,14 @@ enum class OsrSourceTier {
|
||||
};
|
||||
|
||||
void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
Register current_loop_depth,
|
||||
Register encoded_current_bytecode_offset,
|
||||
Register osr_urgency_and_install_target) {
|
||||
static constexpr Register scratch = x3;
|
||||
DCHECK(!AreAliased(scratch, current_loop_depth,
|
||||
encoded_current_bytecode_offset,
|
||||
osr_urgency_and_install_target));
|
||||
// OSR based on urgency, i.e. is the OSR urgency greater than the current
|
||||
// loop depth?
|
||||
Label try_osr;
|
||||
STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
|
||||
Register urgency = scratch;
|
||||
__ And(urgency, osr_urgency_and_install_target,
|
||||
BytecodeArray::OsrUrgencyBits::kMask);
|
||||
__ Cmp(urgency, current_loop_depth);
|
||||
__ B(hi, &try_osr);
|
||||
|
||||
// OSR based on the install target offset, i.e. does the current bytecode
|
||||
// offset match the install target offset?
|
||||
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
|
||||
Register install_target = osr_urgency_and_install_target;
|
||||
__ And(install_target, osr_urgency_and_install_target, Operand(kMask));
|
||||
__ Cmp(install_target, encoded_current_bytecode_offset);
|
||||
__ B(eq, &try_osr);
|
||||
|
||||
// Neither urgency nor the install target triggered, return to the caller.
|
||||
// Note: the return value must be nullptr or a valid Code object.
|
||||
__ Move(x0, xzr);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&try_osr);
|
||||
Register maybe_target_code) {
|
||||
Label jump_to_optimized_code;
|
||||
{
|
||||
// If maybe_target_code is not null, no need to call into runtime. A
|
||||
// precondition here is: if maybe_target_code is a Code object, it must NOT
|
||||
// be marked_for_deoptimization (callers must ensure this).
|
||||
__ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code);
|
||||
}
|
||||
|
||||
ASM_CODE_COMMENT(masm);
|
||||
{
|
||||
@ -2115,11 +2094,10 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
}
|
||||
|
||||
// If the code object is null, just return to the caller.
|
||||
Label skip;
|
||||
__ CompareTaggedAndBranch(x0, Smi::zero(), ne, &skip);
|
||||
__ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code);
|
||||
__ Ret();
|
||||
|
||||
__ Bind(&skip);
|
||||
__ Bind(&jump_to_optimized_code);
|
||||
|
||||
if (source == OsrSourceTier::kInterpreter) {
|
||||
// Drop the handler frame that is be sitting on top of the actual
|
||||
@ -2153,23 +2131,19 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
|
||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
using D = InterpreterOnStackReplacementDescriptor;
|
||||
STATIC_ASSERT(D::kParameterCount == 3);
|
||||
STATIC_ASSERT(D::kParameterCount == 1);
|
||||
OnStackReplacement(masm, OsrSourceTier::kInterpreter,
|
||||
D::CurrentLoopDepthRegister(),
|
||||
D::EncodedCurrentBytecodeOffsetRegister(),
|
||||
D::OsrUrgencyAndInstallTargetRegister());
|
||||
D::MaybeTargetCodeRegister());
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
||||
using D = BaselineOnStackReplacementDescriptor;
|
||||
STATIC_ASSERT(D::kParameterCount == 3);
|
||||
STATIC_ASSERT(D::kParameterCount == 1);
|
||||
|
||||
__ ldr(kContextRegister,
|
||||
MemOperand(fp, BaselineFrameConstants::kContextOffset));
|
||||
OnStackReplacement(masm, OsrSourceTier::kBaseline,
|
||||
D::CurrentLoopDepthRegister(),
|
||||
D::EncodedCurrentBytecodeOffsetRegister(),
|
||||
D::OsrUrgencyAndInstallTargetRegister());
|
||||
D::MaybeTargetCodeRegister());
|
||||
}
|
||||
|
||||
// static
|
||||
@ -4227,9 +4201,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
__ Pop(kInterpreterAccumulatorRegister, padreg);
|
||||
|
||||
if (is_osr) {
|
||||
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't
|
||||
// disarm Sparkplug here.
|
||||
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
|
||||
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
|
||||
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
|
||||
} else {
|
||||
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
|
||||
|
@ -85,12 +85,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
TNode<CodeDataContainer> code_data_container =
|
||||
CodeDataContainerFromCodeT(optimized_code);
|
||||
TNode<Int32T> code_kind_specific_flags = LoadObjectField<Int32T>(
|
||||
code_data_container, CodeDataContainer::kKindSpecificFlagsOffset);
|
||||
GotoIf(IsSetWord32<Code::MarkedForDeoptimizationField>(
|
||||
code_kind_specific_flags),
|
||||
GotoIf(IsMarkedForDeoptimization(optimized_code),
|
||||
&heal_optimized_code_slot);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure into
|
||||
|
@ -856,10 +856,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
|
||||
// Check if the optimized code is marked for deopt. If it is, bailout to a
|
||||
// given label.
|
||||
__ mov(eax,
|
||||
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
|
||||
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, eax);
|
||||
__ j(not_zero, &heal_optimized_code_slot);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure
|
||||
@ -1039,14 +1036,20 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
|
||||
namespace {
|
||||
|
||||
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
|
||||
Register bytecode_array) {
|
||||
// Reset the bytecode age and OSR state (optimized to a single write).
|
||||
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
|
||||
void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array) {
|
||||
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
|
||||
__ mov(FieldOperand(bytecode_array,
|
||||
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
|
||||
Immediate(0));
|
||||
__ mov_w(FieldOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset),
|
||||
Immediate(0));
|
||||
}
|
||||
|
||||
void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
|
||||
Register feedback_vector, Register scratch) {
|
||||
__ mov_b(scratch,
|
||||
FieldOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
|
||||
__ and_(scratch,
|
||||
Immediate(FeedbackVector::MaybeHasOptimizedOsrCodeBit::kMask));
|
||||
__ mov_b(FieldOperand(feedback_vector, FeedbackVector::kOsrStateOffset),
|
||||
scratch);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -1101,13 +1104,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state, xmm1,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
|
||||
// Load the feedback vector and increment the invocation count.
|
||||
// Reload the feedback vector.
|
||||
// TODO(jgruber): Don't clobber it above.
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
|
||||
|
||||
{
|
||||
static constexpr Register scratch = eax;
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, scratch);
|
||||
}
|
||||
|
||||
// Increment the invocation count.
|
||||
__ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
|
||||
|
||||
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||
@ -1139,7 +1147,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
|
||||
}
|
||||
|
||||
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
|
||||
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
|
||||
|
||||
// Push bytecode array.
|
||||
__ push(kInterpreterBytecodeArrayRegister);
|
||||
@ -1722,8 +1730,16 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
saved_feedback_vector,
|
||||
&has_optimized_code_or_state);
|
||||
|
||||
// Load the feedback vector and increment the invocation count.
|
||||
// Reload the feedback vector.
|
||||
__ movd(feedback_vector, saved_feedback_vector);
|
||||
|
||||
{
|
||||
DCHECK_EQ(arg_count, eax);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, eax);
|
||||
__ movd(arg_count, saved_arg_count); // Restore eax.
|
||||
}
|
||||
|
||||
// Increment the invocation count.
|
||||
__ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
|
||||
|
||||
XMMRegister return_address = xmm4;
|
||||
@ -1751,7 +1767,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
// the frame, so load it into a register.
|
||||
Register bytecode_array = scratch;
|
||||
__ movd(bytecode_array, saved_bytecode_array);
|
||||
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
|
||||
ResetBytecodeAge(masm, bytecode_array);
|
||||
__ Push(bytecode_array);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
@ -2808,37 +2824,15 @@ enum class OsrSourceTier {
|
||||
};
|
||||
|
||||
void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
Register current_loop_depth,
|
||||
Register encoded_current_bytecode_offset,
|
||||
Register osr_urgency_and_install_target) {
|
||||
static constexpr Register scratch = edi;
|
||||
DCHECK(!AreAliased(scratch, current_loop_depth,
|
||||
encoded_current_bytecode_offset,
|
||||
osr_urgency_and_install_target));
|
||||
// OSR based on urgency, i.e. is the OSR urgency greater than the current
|
||||
// loop depth?
|
||||
Label try_osr;
|
||||
STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
|
||||
Register urgency = scratch;
|
||||
__ Move(urgency, osr_urgency_and_install_target);
|
||||
__ and_(urgency, Immediate(BytecodeArray::OsrUrgencyBits::kMask));
|
||||
__ cmp(urgency, current_loop_depth);
|
||||
__ j(above, &try_osr, Label::kNear);
|
||||
|
||||
// OSR based on the install target offset, i.e. does the current bytecode
|
||||
// offset match the install target offset?
|
||||
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
|
||||
Register install_target = osr_urgency_and_install_target;
|
||||
__ and_(install_target, Immediate(kMask));
|
||||
__ cmp(install_target, encoded_current_bytecode_offset);
|
||||
__ j(equal, &try_osr, Label::kNear);
|
||||
|
||||
// Neither urgency nor the install target triggered, return to the caller.
|
||||
// Note: the return value must be nullptr or a valid Code object.
|
||||
__ Move(eax, Immediate(0));
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&try_osr);
|
||||
Register maybe_target_code) {
|
||||
Label jump_to_optimized_code;
|
||||
{
|
||||
// If maybe_target_code is not null, no need to call into runtime. A
|
||||
// precondition here is: if maybe_target_code is a Code object, it must NOT
|
||||
// be marked_for_deoptimization (callers must ensure this).
|
||||
__ cmp(maybe_target_code, Immediate(0));
|
||||
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
|
||||
}
|
||||
|
||||
ASM_CODE_COMMENT(masm);
|
||||
{
|
||||
@ -2846,13 +2840,12 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
__ CallRuntime(Runtime::kCompileOptimizedOSR);
|
||||
}
|
||||
|
||||
Label skip;
|
||||
// If the code object is null, just return to the caller.
|
||||
__ cmp(eax, Immediate(0));
|
||||
__ j(not_equal, &skip, Label::kNear);
|
||||
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&skip);
|
||||
__ bind(&jump_to_optimized_code);
|
||||
|
||||
if (source == OsrSourceTier::kInterpreter) {
|
||||
// Drop the handler frame that is be sitting on top of the actual
|
||||
@ -2880,23 +2873,19 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
|
||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
using D = InterpreterOnStackReplacementDescriptor;
|
||||
STATIC_ASSERT(D::kParameterCount == 3);
|
||||
STATIC_ASSERT(D::kParameterCount == 1);
|
||||
OnStackReplacement(masm, OsrSourceTier::kInterpreter,
|
||||
D::CurrentLoopDepthRegister(),
|
||||
D::EncodedCurrentBytecodeOffsetRegister(),
|
||||
D::OsrUrgencyAndInstallTargetRegister());
|
||||
D::MaybeTargetCodeRegister());
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
||||
using D = BaselineOnStackReplacementDescriptor;
|
||||
STATIC_ASSERT(D::kParameterCount == 3);
|
||||
STATIC_ASSERT(D::kParameterCount == 1);
|
||||
|
||||
__ mov(kContextRegister,
|
||||
MemOperand(ebp, BaselineFrameConstants::kContextOffset));
|
||||
OnStackReplacement(masm, OsrSourceTier::kBaseline,
|
||||
D::CurrentLoopDepthRegister(),
|
||||
D::EncodedCurrentBytecodeOffsetRegister(),
|
||||
D::OsrUrgencyAndInstallTargetRegister());
|
||||
D::MaybeTargetCodeRegister());
|
||||
}
|
||||
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
@ -4323,9 +4312,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
__ pop(kInterpreterAccumulatorRegister);
|
||||
|
||||
if (is_osr) {
|
||||
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't
|
||||
// disarm Sparkplug here.
|
||||
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
|
||||
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
|
||||
Generate_OSREntry(masm, code_obj);
|
||||
} else {
|
||||
__ jmp(code_obj);
|
||||
|
@ -992,18 +992,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
// Check if the optimized code is marked for deopt. If it is, call the
|
||||
// runtime to clear it.
|
||||
__ AssertCodeT(optimized_code_entry);
|
||||
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
|
||||
__ testl(FieldOperand(optimized_code_entry,
|
||||
CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
} else {
|
||||
__ LoadTaggedPointerField(
|
||||
scratch1,
|
||||
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
|
||||
__ testl(
|
||||
FieldOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
}
|
||||
__ TestCodeTIsMarkedForDeoptimization(optimized_code_entry, scratch1);
|
||||
__ j(not_zero, &heal_optimized_code_slot);
|
||||
|
||||
// Optimized code is good, get it into the closure and link the closure into
|
||||
@ -1146,16 +1135,22 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
|
||||
|
||||
namespace {
|
||||
|
||||
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
|
||||
Register bytecode_array) {
|
||||
// Reset the bytecode age and OSR state (optimized to a single write).
|
||||
static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
|
||||
void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array) {
|
||||
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
|
||||
__ movl(FieldOperand(bytecode_array,
|
||||
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
|
||||
__ movw(FieldOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset),
|
||||
Immediate(0));
|
||||
}
|
||||
|
||||
void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm,
|
||||
Register feedback_vector, Register scratch) {
|
||||
__ movb(scratch,
|
||||
FieldOperand(feedback_vector, FeedbackVector::kOsrStateOffset));
|
||||
__ andb(scratch,
|
||||
Immediate(FeedbackVector::MaybeHasOptimizedOsrCodeBit::kMask));
|
||||
__ movb(FieldOperand(feedback_vector, FeedbackVector::kOsrStateOffset),
|
||||
scratch);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Generate code for entering a JS function with the interpreter.
|
||||
@ -1215,8 +1210,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
|
||||
Label not_optimized;
|
||||
__ bind(¬_optimized);
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, kScratchRegister);
|
||||
|
||||
// Increment invocation count for the function.
|
||||
__ incl(
|
||||
@ -1233,7 +1227,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ Push(kJavaScriptCallTargetRegister); // Callee's JS function.
|
||||
__ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
|
||||
|
||||
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
|
||||
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
|
||||
|
||||
// Load initial bytecode offset.
|
||||
__ Move(kInterpreterBytecodeOffsetRegister,
|
||||
@ -1710,6 +1704,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
LoadTieringStateAndJumpIfNeedsProcessing(
|
||||
masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
|
||||
|
||||
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, kScratchRegister);
|
||||
|
||||
// Increment invocation count for the function.
|
||||
__ incl(
|
||||
FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
|
||||
@ -1739,7 +1735,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
||||
// onto the frame, so load it into a register.
|
||||
Register bytecode_array = descriptor.GetRegisterParameter(
|
||||
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
|
||||
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
|
||||
ResetBytecodeAge(masm, bytecode_array);
|
||||
__ Push(bytecode_array);
|
||||
|
||||
// Baseline code frames store the feedback vector where interpreter would
|
||||
@ -2730,49 +2726,28 @@ enum class OsrSourceTier {
|
||||
};
|
||||
|
||||
void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
Register current_loop_depth,
|
||||
Register encoded_current_bytecode_offset,
|
||||
Register osr_urgency_and_install_target) {
|
||||
DCHECK(!AreAliased(kScratchRegister, current_loop_depth,
|
||||
encoded_current_bytecode_offset,
|
||||
osr_urgency_and_install_target));
|
||||
// OSR based on urgency, i.e. is the OSR urgency greater than the current
|
||||
// loop depth?
|
||||
Label try_osr;
|
||||
STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
|
||||
Register urgency = kScratchRegister;
|
||||
__ Move(urgency, osr_urgency_and_install_target);
|
||||
__ andq(urgency, Immediate(BytecodeArray::OsrUrgencyBits::kMask));
|
||||
__ cmpq(urgency, current_loop_depth);
|
||||
__ j(above, &try_osr, Label::kNear);
|
||||
|
||||
// OSR based on the install target offset, i.e. does the current bytecode
|
||||
// offset match the install target offset?
|
||||
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
|
||||
Register install_target = osr_urgency_and_install_target;
|
||||
__ andq(install_target, Immediate(kMask));
|
||||
__ cmpq(install_target, encoded_current_bytecode_offset);
|
||||
__ j(equal, &try_osr, Label::kNear);
|
||||
|
||||
// Neither urgency nor the install target triggered, return to the caller.
|
||||
// Note: the return value must be nullptr or a valid Code object.
|
||||
__ Move(rax, Immediate(0));
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&try_osr);
|
||||
Register maybe_target_code) {
|
||||
Label jump_to_optimized_code;
|
||||
{
|
||||
// If maybe_target_code is not null, no need to call into runtime. A
|
||||
// precondition here is: if maybe_target_code is a Code object, it must NOT
|
||||
// be marked_for_deoptimization (callers must ensure this).
|
||||
__ testq(maybe_target_code, maybe_target_code);
|
||||
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
|
||||
}
|
||||
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ CallRuntime(Runtime::kCompileOptimizedOSR);
|
||||
}
|
||||
|
||||
Label jump_to_returned_code;
|
||||
// If the code object is null, just return to the caller.
|
||||
__ testq(rax, rax);
|
||||
__ j(not_equal, &jump_to_returned_code, Label::kNear);
|
||||
__ j(not_equal, &jump_to_optimized_code, Label::kNear);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&jump_to_returned_code);
|
||||
__ bind(&jump_to_optimized_code);
|
||||
DCHECK_EQ(maybe_target_code, rax); // Already in the right spot.
|
||||
|
||||
if (source == OsrSourceTier::kInterpreter) {
|
||||
// Drop the handler frame that is be sitting on top of the actual
|
||||
@ -2803,23 +2778,18 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
||||
|
||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||
using D = InterpreterOnStackReplacementDescriptor;
|
||||
STATIC_ASSERT(D::kParameterCount == 3);
|
||||
STATIC_ASSERT(D::kParameterCount == 1);
|
||||
OnStackReplacement(masm, OsrSourceTier::kInterpreter,
|
||||
D::CurrentLoopDepthRegister(),
|
||||
D::EncodedCurrentBytecodeOffsetRegister(),
|
||||
D::OsrUrgencyAndInstallTargetRegister());
|
||||
D::MaybeTargetCodeRegister());
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
|
||||
using D = BaselineOnStackReplacementDescriptor;
|
||||
STATIC_ASSERT(D::kParameterCount == 3);
|
||||
|
||||
STATIC_ASSERT(D::kParameterCount == 1);
|
||||
__ movq(kContextRegister,
|
||||
MemOperand(rbp, BaselineFrameConstants::kContextOffset));
|
||||
OnStackReplacement(masm, OsrSourceTier::kBaseline,
|
||||
D::CurrentLoopDepthRegister(),
|
||||
D::EncodedCurrentBytecodeOffsetRegister(),
|
||||
D::OsrUrgencyAndInstallTargetRegister());
|
||||
D::MaybeTargetCodeRegister());
|
||||
}
|
||||
|
||||
#if V8_ENABLE_WEBASSEMBLY
|
||||
@ -5169,8 +5139,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
||||
__ popq(kInterpreterAccumulatorRegister);
|
||||
|
||||
if (is_osr) {
|
||||
// TODO(pthier): Separate Sparkplug and Turbofan OSR states.
|
||||
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
|
||||
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
|
||||
Generate_OSREntry(masm, code_obj);
|
||||
} else {
|
||||
__ jmp(code_obj);
|
||||
|
@ -397,6 +397,19 @@ void TurboAssembler::Ret(int drop, Condition cond) {
|
||||
Ret(cond);
|
||||
}
|
||||
|
||||
void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
|
||||
Register scratch) {
|
||||
ldr(scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
|
||||
ldr(scratch,
|
||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
|
||||
}
|
||||
|
||||
Operand MacroAssembler::ClearedValue() const {
|
||||
return Operand(
|
||||
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
|
||||
}
|
||||
|
||||
void TurboAssembler::Call(Label* target) { bl(target); }
|
||||
|
||||
void TurboAssembler::Push(Handle<HeapObject> handle) {
|
||||
|
@ -872,6 +872,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
DecodeField<Field>(reg, reg);
|
||||
}
|
||||
|
||||
void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
|
||||
Operand ClearedValue() const;
|
||||
|
||||
private:
|
||||
// Helper functions for generating invokes.
|
||||
void InvokePrologue(Register expected_parameter_count,
|
||||
|
@ -2485,6 +2485,29 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
|
||||
Bind(&done);
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfCodeTIsMarkedForDeoptimization(
|
||||
Register codet, Register scratch, Label* if_marked_for_deoptimization) {
|
||||
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
|
||||
Ldr(scratch.W(),
|
||||
FieldMemOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
|
||||
if_marked_for_deoptimization);
|
||||
|
||||
} else {
|
||||
LoadTaggedPointerField(
|
||||
scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset));
|
||||
Ldr(scratch.W(),
|
||||
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
|
||||
if_marked_for_deoptimization);
|
||||
}
|
||||
}
|
||||
|
||||
Operand MacroAssembler::ClearedValue() const {
|
||||
return Operand(
|
||||
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
|
||||
}
|
||||
|
||||
Operand MacroAssembler::ReceiverOperand(Register arg_count) {
|
||||
return Operand(0);
|
||||
}
|
||||
|
@ -1838,6 +1838,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
DecodeField<Field>(reg, reg);
|
||||
}
|
||||
|
||||
void JumpIfCodeTIsMarkedForDeoptimization(
|
||||
Register codet, Register scratch, Label* if_marked_for_deoptimization);
|
||||
Operand ClearedValue() const;
|
||||
|
||||
Operand ReceiverOperand(const Register arg_count);
|
||||
|
||||
// ---- SMI and Number Utilities ----
|
||||
|
@ -2202,6 +2202,11 @@ TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) {
|
||||
WordOr(BitcastTaggedToWord(value), IntPtrConstant(kWeakHeapObjectTag))));
|
||||
}
|
||||
|
||||
TNode<MaybeObject> CodeStubAssembler::ClearedValue() {
|
||||
return ReinterpretCast<MaybeObject>(BitcastWordToTagged(
|
||||
IntPtrConstant(HeapObjectReference::ClearedValue(isolate()).ptr())));
|
||||
}
|
||||
|
||||
template <>
|
||||
TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<FixedArray> array) {
|
||||
return LoadAndUntagFixedArrayBaseLength(array);
|
||||
@ -10450,22 +10455,30 @@ TNode<HeapObject> CodeStubAssembler::LoadFeedbackCellValue(
|
||||
|
||||
TNode<HeapObject> CodeStubAssembler::LoadFeedbackVector(
|
||||
TNode<JSFunction> closure) {
|
||||
TVARIABLE(HeapObject, maybe_vector, LoadFeedbackCellValue(closure));
|
||||
Label done(this);
|
||||
TVARIABLE(HeapObject, maybe_vector);
|
||||
Label if_no_feedback_vector(this), out(this);
|
||||
|
||||
maybe_vector = LoadFeedbackVector(closure, &if_no_feedback_vector);
|
||||
Goto(&out);
|
||||
|
||||
BIND(&if_no_feedback_vector);
|
||||
// If the closure doesn't have a feedback vector allocated yet, return
|
||||
// undefined. FeedbackCell can contain Undefined / FixedArray (for lazy
|
||||
// undefined. The FeedbackCell can contain Undefined / FixedArray (for lazy
|
||||
// allocations) / FeedbackVector.
|
||||
GotoIf(IsFeedbackVector(maybe_vector.value()), &done);
|
||||
|
||||
// In all other cases return Undefined.
|
||||
maybe_vector = UndefinedConstant();
|
||||
Goto(&done);
|
||||
Goto(&out);
|
||||
|
||||
BIND(&done);
|
||||
BIND(&out);
|
||||
return maybe_vector.value();
|
||||
}
|
||||
|
||||
TNode<FeedbackVector> CodeStubAssembler::LoadFeedbackVector(
|
||||
TNode<JSFunction> closure, Label* if_no_feedback_vector) {
|
||||
TNode<HeapObject> maybe_vector = LoadFeedbackCellValue(closure);
|
||||
GotoIfNot(IsFeedbackVector(maybe_vector), if_no_feedback_vector);
|
||||
return CAST(maybe_vector);
|
||||
}
|
||||
|
||||
TNode<ClosureFeedbackCellArray> CodeStubAssembler::LoadClosureFeedbackArray(
|
||||
TNode<JSFunction> closure) {
|
||||
TVARIABLE(HeapObject, feedback_cell_array, LoadFeedbackCellValue(closure));
|
||||
@ -14778,6 +14791,12 @@ TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<CodeT> code) {
|
||||
#endif
|
||||
}
|
||||
|
||||
TNode<BoolT> CodeStubAssembler::IsMarkedForDeoptimization(TNode<CodeT> codet) {
|
||||
return IsSetWord32<Code::MarkedForDeoptimizationField>(
|
||||
LoadObjectField<Int32T>(CodeDataContainerFromCodeT(codet),
|
||||
CodeDataContainer::kKindSpecificFlagsOffset));
|
||||
}
|
||||
|
||||
TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
|
||||
TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
|
||||
TNode<Context> context) {
|
||||
|
@ -872,6 +872,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
}
|
||||
|
||||
TNode<RawPtrT> GetCodeEntry(TNode<CodeT> code);
|
||||
TNode<BoolT> IsMarkedForDeoptimization(TNode<CodeT> codet);
|
||||
|
||||
// The following Call wrappers call an object according to the semantics that
|
||||
// one finds in the EcmaScript spec, operating on an Callable (e.g. a
|
||||
@ -1495,6 +1496,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
TNode<Object> object);
|
||||
|
||||
TNode<MaybeObject> MakeWeak(TNode<HeapObject> value);
|
||||
TNode<MaybeObject> ClearedValue();
|
||||
|
||||
void FixedArrayBoundsCheck(TNode<FixedArrayBase> array, TNode<Smi> index,
|
||||
int additional_offset);
|
||||
@ -3387,6 +3389,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
// The returned object could be undefined if the closure does not have
|
||||
// a feedback vector associated with it.
|
||||
TNode<HeapObject> LoadFeedbackVector(TNode<JSFunction> closure);
|
||||
TNode<FeedbackVector> LoadFeedbackVector(TNode<JSFunction> closure,
|
||||
Label* if_no_feedback_vector);
|
||||
|
||||
// Load the ClosureFeedbackCellArray that contains the feedback cells
|
||||
// used when creating closures from this function. This array could be
|
||||
|
@ -894,32 +894,32 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
|
||||
return true;
|
||||
}
|
||||
|
||||
// A wrapper to access either the OSR optimized code cache (one per native
|
||||
// context), or the optimized code cache slot on the feedback vector.
|
||||
// A wrapper to access the optimized code cache slots on the feedback vector.
|
||||
class OptimizedCodeCache : public AllStatic {
|
||||
public:
|
||||
static V8_WARN_UNUSED_RESULT MaybeHandle<CodeT> Get(
|
||||
Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
|
||||
CodeKind code_kind) {
|
||||
if (!CodeKindIsStoredInOptimizedCodeCache(code_kind)) return {};
|
||||
if (!function->has_feedback_vector()) return {};
|
||||
|
||||
DisallowGarbageCollection no_gc;
|
||||
SharedFunctionInfo shared = function->shared();
|
||||
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
|
||||
|
||||
CodeT code;
|
||||
FeedbackVector feedback_vector = function->feedback_vector();
|
||||
if (IsOSR(osr_offset)) {
|
||||
// For OSR, check the OSR optimized code cache.
|
||||
code = function->native_context().osr_code_cache().TryGet(
|
||||
shared, osr_offset, isolate);
|
||||
Handle<BytecodeArray> bytecode(shared.GetBytecodeArray(isolate), isolate);
|
||||
interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt());
|
||||
DCHECK_EQ(it.current_bytecode(), interpreter::Bytecode::kJumpLoop);
|
||||
base::Optional<CodeT> maybe_code =
|
||||
feedback_vector.GetOptimizedOsrCode(isolate, it.GetSlotOperand(2));
|
||||
if (maybe_code.has_value()) code = maybe_code.value();
|
||||
} else {
|
||||
// Non-OSR code may be cached on the feedback vector.
|
||||
if (function->has_feedback_vector()) {
|
||||
FeedbackVector feedback_vector = function->feedback_vector();
|
||||
feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
|
||||
shared, "OptimizedCodeCache::Get");
|
||||
code = feedback_vector.optimized_code();
|
||||
}
|
||||
feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
|
||||
shared, "OptimizedCodeCache::Get");
|
||||
code = feedback_vector.optimized_code();
|
||||
}
|
||||
|
||||
DCHECK_IMPLIES(!code.is_null(), code.kind() <= code_kind);
|
||||
@ -935,23 +935,24 @@ class OptimizedCodeCache : public AllStatic {
|
||||
return handle(code, isolate);
|
||||
}
|
||||
|
||||
static void Insert(OptimizedCompilationInfo* compilation_info) {
|
||||
static void Insert(Isolate* isolate,
|
||||
OptimizedCompilationInfo* compilation_info) {
|
||||
const CodeKind kind = compilation_info->code_kind();
|
||||
if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
|
||||
|
||||
// Cache optimized code.
|
||||
Handle<JSFunction> function = compilation_info->closure();
|
||||
Isolate* isolate = function->GetIsolate();
|
||||
Handle<CodeT> code = ToCodeT(compilation_info->code(), isolate);
|
||||
const BytecodeOffset osr_offset = compilation_info->osr_offset();
|
||||
FeedbackVector feedback_vector = function->feedback_vector();
|
||||
|
||||
if (IsOSR(osr_offset)) {
|
||||
DCHECK(CodeKindCanOSR(kind));
|
||||
DCHECK(!compilation_info->function_context_specializing());
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<NativeContext> native_context(function->native_context(), isolate);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
osr_offset);
|
||||
SharedFunctionInfo shared = function->shared();
|
||||
Handle<BytecodeArray> bytecode(shared.GetBytecodeArray(isolate), isolate);
|
||||
interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt());
|
||||
DCHECK_EQ(it.current_bytecode(), interpreter::Bytecode::kJumpLoop);
|
||||
feedback_vector.SetOptimizedOsrCode(it.GetSlotOperand(2), *code);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -960,13 +961,13 @@ class OptimizedCodeCache : public AllStatic {
|
||||
if (compilation_info->function_context_specializing()) {
|
||||
// Function context specialization folds-in the function context, so no
|
||||
// sharing can occur. Make sure the optimized code cache is cleared.
|
||||
if (function->feedback_vector().has_optimized_code()) {
|
||||
function->feedback_vector().ClearOptimizedCode();
|
||||
if (feedback_vector.has_optimized_code()) {
|
||||
feedback_vector.ClearOptimizedCode();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
function->feedback_vector().SetOptimizedCode(code);
|
||||
feedback_vector.SetOptimizedCode(code);
|
||||
}
|
||||
};
|
||||
|
||||
@ -1017,7 +1018,7 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate,
|
||||
// Success!
|
||||
job->RecordCompilationStats(ConcurrencyMode::kSynchronous, isolate);
|
||||
DCHECK(!isolate->has_pending_exception());
|
||||
OptimizedCodeCache::Insert(compilation_info);
|
||||
OptimizedCodeCache::Insert(isolate, compilation_info);
|
||||
job->RecordFunctionCompilation(LogEventListener::LAZY_COMPILE_TAG, isolate);
|
||||
return true;
|
||||
}
|
||||
@ -2168,10 +2169,6 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
|
||||
return false;
|
||||
}
|
||||
shared->set_baseline_code(ToCodeT(*code), kReleaseStore);
|
||||
|
||||
if (V8_LIKELY(FLAG_use_osr)) {
|
||||
shared->GetBytecodeArray(isolate).RequestOsrAtNextOpportunity();
|
||||
}
|
||||
}
|
||||
double time_taken_ms = time_taken.InMillisecondsF();
|
||||
|
||||
@ -3402,15 +3399,7 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
|
||||
|
||||
// -- Alright, decided to proceed. --
|
||||
|
||||
// Disarm all back edges, i.e. reset the OSR urgency and install target.
|
||||
//
|
||||
// Note that the bytecode array active on the stack might be different from
|
||||
// the one installed on the function (e.g. patched by debugger). This however
|
||||
// is fine because we guarantee the layout to be in sync, hence any
|
||||
// BytecodeOffset representing the entry point will be valid for any copy of
|
||||
// the bytecode.
|
||||
Handle<BytecodeArray> bytecode(frame->GetBytecodeArray(), isolate);
|
||||
bytecode->reset_osr_urgency_and_install_target();
|
||||
function->feedback_vector().reset_osr_urgency();
|
||||
|
||||
CompilerTracer::TraceOptimizeOSR(isolate, function, osr_offset, mode);
|
||||
MaybeHandle<CodeT> result = GetOrCompileOptimized(
|
||||
@ -3471,16 +3460,9 @@ bool Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
|
||||
isolate);
|
||||
if (V8_LIKELY(use_result)) {
|
||||
ResetTieringState(*function, osr_offset);
|
||||
OptimizedCodeCache::Insert(compilation_info);
|
||||
OptimizedCodeCache::Insert(isolate, compilation_info);
|
||||
CompilerTracer::TraceCompletedJob(isolate, compilation_info);
|
||||
if (IsOSR(osr_offset)) {
|
||||
if (FLAG_trace_osr) {
|
||||
PrintF(CodeTracer::Scope{isolate->GetCodeTracer()}.file(),
|
||||
"[OSR - requesting install. function: %s, osr offset: %d]\n",
|
||||
function->DebugNameCStr().get(), osr_offset.ToInt());
|
||||
}
|
||||
shared->GetBytecodeArray(isolate).set_osr_install_target(osr_offset);
|
||||
} else {
|
||||
if (!IsOSR(osr_offset)) {
|
||||
function->set_code(*compilation_info->code(), kReleaseStore);
|
||||
}
|
||||
}
|
||||
|
@ -733,6 +733,18 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
|
||||
CompareRange(instance_type_out, lower_limit, higher_limit, scratch);
|
||||
}
|
||||
|
||||
void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
|
||||
Register scratch) {
|
||||
mov(scratch, FieldOperand(codet, Code::kCodeDataContainerOffset));
|
||||
test(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
}
|
||||
|
||||
Immediate MacroAssembler::ClearedValue() const {
|
||||
return Immediate(
|
||||
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertSmi(Register object) {
|
||||
if (FLAG_debug_code) {
|
||||
ASM_CODE_COMMENT(this);
|
||||
|
@ -557,6 +557,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
and_(reg, Immediate(mask));
|
||||
}
|
||||
|
||||
void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
|
||||
Immediate ClearedValue() const;
|
||||
|
||||
// Abort execution if argument is not a smi, enabled via --debug-code.
|
||||
void AssertSmi(Register object);
|
||||
|
||||
|
@ -363,19 +363,11 @@ constexpr auto BaselineOnStackReplacementDescriptor::registers() {
|
||||
|
||||
// static
|
||||
constexpr Register
|
||||
BaselineOnStackReplacementDescriptor::CurrentLoopDepthRegister() {
|
||||
BaselineOnStackReplacementDescriptor::MaybeTargetCodeRegister() {
|
||||
// Picking the first register on purpose because it's convenient that this
|
||||
// register is the same as the platform's return-value register.
|
||||
return registers()[0];
|
||||
}
|
||||
// static
|
||||
constexpr Register
|
||||
BaselineOnStackReplacementDescriptor::EncodedCurrentBytecodeOffsetRegister() {
|
||||
return registers()[1];
|
||||
}
|
||||
// static
|
||||
constexpr Register
|
||||
BaselineOnStackReplacementDescriptor::OsrUrgencyAndInstallTargetRegister() {
|
||||
return registers()[2];
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr auto InterpreterOnStackReplacementDescriptor::registers() {
|
||||
@ -385,21 +377,9 @@ constexpr auto InterpreterOnStackReplacementDescriptor::registers() {
|
||||
|
||||
// static
|
||||
constexpr Register
|
||||
InterpreterOnStackReplacementDescriptor::CurrentLoopDepthRegister() {
|
||||
InterpreterOnStackReplacementDescriptor::MaybeTargetCodeRegister() {
|
||||
using BaselineD = BaselineOnStackReplacementDescriptor;
|
||||
return BaselineD::CurrentLoopDepthRegister();
|
||||
}
|
||||
// static
|
||||
constexpr Register InterpreterOnStackReplacementDescriptor::
|
||||
EncodedCurrentBytecodeOffsetRegister() {
|
||||
using BaselineD = BaselineOnStackReplacementDescriptor;
|
||||
return BaselineD::EncodedCurrentBytecodeOffsetRegister();
|
||||
}
|
||||
// static
|
||||
constexpr Register
|
||||
InterpreterOnStackReplacementDescriptor::OsrUrgencyAndInstallTargetRegister() {
|
||||
using BaselineD = BaselineOnStackReplacementDescriptor;
|
||||
return BaselineD::OsrUrgencyAndInstallTargetRegister();
|
||||
return BaselineD::MaybeTargetCodeRegister();
|
||||
}
|
||||
|
||||
// static
|
||||
|
@ -1706,16 +1706,11 @@ class InterpreterOnStackReplacementDescriptor
|
||||
: public StaticCallInterfaceDescriptor<
|
||||
InterpreterOnStackReplacementDescriptor> {
|
||||
public:
|
||||
DEFINE_PARAMETERS(kCurrentLoopDepth, kEncodedCurrentBytecodeOffset,
|
||||
kOsrUrgencyAndInstallTarget)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kCurrentLoopDepth
|
||||
MachineType::Int32(), // kEncodedCurrentBytecodeOffset
|
||||
MachineType::Int32()) // kOsrUrgencyAndInstallTarget
|
||||
DEFINE_PARAMETERS(kMaybeTargetCode)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) // kMaybeTargetCode
|
||||
DECLARE_DESCRIPTOR(InterpreterOnStackReplacementDescriptor)
|
||||
|
||||
static constexpr inline Register CurrentLoopDepthRegister();
|
||||
static constexpr inline Register EncodedCurrentBytecodeOffsetRegister();
|
||||
static constexpr inline Register OsrUrgencyAndInstallTargetRegister();
|
||||
static constexpr inline Register MaybeTargetCodeRegister();
|
||||
|
||||
static constexpr inline auto registers();
|
||||
};
|
||||
@ -1724,16 +1719,11 @@ class BaselineOnStackReplacementDescriptor
|
||||
: public StaticCallInterfaceDescriptor<
|
||||
BaselineOnStackReplacementDescriptor> {
|
||||
public:
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kCurrentLoopDepth, kEncodedCurrentBytecodeOffset,
|
||||
kOsrUrgencyAndInstallTarget)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kCurrentLoopDepth
|
||||
MachineType::Int32(), // kEncodedCurrentBytecodeOffset
|
||||
MachineType::Int32()) // kOsrUrgencyAndInstallTarget
|
||||
DEFINE_PARAMETERS_NO_CONTEXT(kMaybeTargetCode)
|
||||
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) // kMaybeTargetCode
|
||||
DECLARE_DESCRIPTOR(BaselineOnStackReplacementDescriptor)
|
||||
|
||||
static constexpr inline Register CurrentLoopDepthRegister();
|
||||
static constexpr inline Register EncodedCurrentBytecodeOffsetRegister();
|
||||
static constexpr inline Register OsrUrgencyAndInstallTargetRegister();
|
||||
static constexpr inline Register MaybeTargetCodeRegister();
|
||||
|
||||
static constexpr inline auto registers();
|
||||
};
|
||||
|
@ -2332,6 +2332,24 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
|
||||
CompareRange(instance_type_out, lower_limit, higher_limit);
|
||||
}
|
||||
|
||||
void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
|
||||
Register scratch) {
|
||||
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
|
||||
testl(FieldOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
} else {
|
||||
LoadTaggedPointerField(scratch,
|
||||
FieldOperand(codet, Code::kCodeDataContainerOffset));
|
||||
testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
|
||||
Immediate(1 << Code::kMarkedForDeoptimizationBit));
|
||||
}
|
||||
}
|
||||
|
||||
Immediate MacroAssembler::ClearedValue() const {
|
||||
return Immediate(
|
||||
static_cast<int32_t>(HeapObjectReference::ClearedValue(isolate()).ptr()));
|
||||
}
|
||||
|
||||
void TurboAssembler::AssertNotSmi(Register object) {
|
||||
if (!FLAG_debug_code) return;
|
||||
ASM_CODE_COMMENT(this);
|
||||
|
@ -812,6 +812,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
||||
andq(reg, Immediate(mask));
|
||||
}
|
||||
|
||||
void TestCodeTIsMarkedForDeoptimization(Register codet, Register scratch);
|
||||
Immediate ClearedValue() const;
|
||||
|
||||
// Abort execution if argument is not a CodeT, enabled via --debug-code.
|
||||
void AssertCodeT(Register object);
|
||||
|
||||
|
@ -376,8 +376,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
|
||||
for (Code code : codes) {
|
||||
isolate->heap()->InvalidateCodeDeoptimizationData(code);
|
||||
}
|
||||
|
||||
native_context.osr_code_cache().EvictDeoptimizedCode(isolate);
|
||||
}
|
||||
|
||||
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
|
||||
@ -392,7 +390,6 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
|
||||
while (!context.IsUndefined(isolate)) {
|
||||
NativeContext native_context = NativeContext::cast(context);
|
||||
MarkAllCodeForContext(native_context);
|
||||
OSROptimizedCodeCache::Clear(isolate, native_context);
|
||||
DeoptimizeMarkedCodeForContext(native_context);
|
||||
context = native_context.next_context_link();
|
||||
}
|
||||
@ -442,13 +439,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
|
||||
function.feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
|
||||
function.shared(), "unlinking code marked for deopt");
|
||||
DeoptimizeMarkedCodeForContext(function.native_context());
|
||||
// TODO(mythria): Ideally EvictMarkCode should compact the cache without
|
||||
// having to explicitly call this. We don't do this currently because
|
||||
// compacting causes GC and DeoptimizeMarkedCodeForContext uses raw
|
||||
// pointers. Update DeoptimizeMarkedCodeForContext to use handles and remove
|
||||
// this call from here.
|
||||
OSROptimizedCodeCache::Compact(
|
||||
isolate, Handle<NativeContext>(function.native_context(), isolate));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -147,29 +147,6 @@ void TieringManager::Optimize(JSFunction function, OptimizationDecision d) {
|
||||
|
||||
namespace {
|
||||
|
||||
bool HaveCachedOSRCodeForCurrentBytecodeOffset(UnoptimizedFrame* frame,
|
||||
BytecodeOffset* osr_offset_out) {
|
||||
JSFunction function = frame->function();
|
||||
const int current_offset = frame->GetBytecodeOffset();
|
||||
OSROptimizedCodeCache cache = function.native_context().osr_code_cache();
|
||||
interpreter::BytecodeArrayIterator iterator(
|
||||
handle(frame->GetBytecodeArray(), frame->isolate()));
|
||||
for (BytecodeOffset osr_offset : cache.OsrOffsetsFor(function.shared())) {
|
||||
DCHECK(!osr_offset.IsNone());
|
||||
iterator.SetOffset(osr_offset.ToInt());
|
||||
if (base::IsInRange(current_offset, iterator.GetJumpTargetOffset(),
|
||||
osr_offset.ToInt())) {
|
||||
*osr_offset_out = osr_offset;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
|
||||
bool TiersUpToMaglev(CodeKind code_kind) {
|
||||
// TODO(v8:7700): Flip the UNLIKELY when appropriate.
|
||||
return V8_UNLIKELY(FLAG_maglev) && CodeKindIsUnoptimizedJSFunction(code_kind);
|
||||
@ -213,53 +190,35 @@ bool SmallEnoughForOSR(Isolate* isolate, JSFunction function) {
|
||||
|
||||
void TrySetOsrUrgency(Isolate* isolate, JSFunction function, int osr_urgency) {
|
||||
SharedFunctionInfo shared = function.shared();
|
||||
// Guaranteed since we've got a feedback vector.
|
||||
DCHECK(shared.IsUserJavaScript());
|
||||
|
||||
if (V8_UNLIKELY(!FLAG_use_osr)) return;
|
||||
if (V8_UNLIKELY(!shared.IsUserJavaScript())) return;
|
||||
if (V8_UNLIKELY(shared.optimization_disabled())) return;
|
||||
|
||||
// We've passed all checks - bump the OSR urgency.
|
||||
|
||||
BytecodeArray bytecode = shared.GetBytecodeArray(isolate);
|
||||
FeedbackVector fv = function.feedback_vector();
|
||||
if (V8_UNLIKELY(FLAG_trace_osr)) {
|
||||
CodeTracer::Scope scope(isolate->GetCodeTracer());
|
||||
PrintF(scope.file(),
|
||||
"[OSR - setting osr urgency. function: %s, old urgency: %d, new "
|
||||
"urgency: %d]\n",
|
||||
function.DebugNameCStr().get(), bytecode.osr_urgency(), osr_urgency);
|
||||
function.DebugNameCStr().get(), fv.osr_urgency(), osr_urgency);
|
||||
}
|
||||
|
||||
DCHECK_GE(osr_urgency, bytecode.osr_urgency()); // Never lower urgency here.
|
||||
bytecode.set_osr_urgency(osr_urgency);
|
||||
DCHECK_GE(osr_urgency, fv.osr_urgency()); // Never lower urgency here.
|
||||
fv.set_osr_urgency(osr_urgency);
|
||||
}
|
||||
|
||||
void TryIncrementOsrUrgency(Isolate* isolate, JSFunction function) {
|
||||
int old_urgency = function.shared().GetBytecodeArray(isolate).osr_urgency();
|
||||
int new_urgency = std::min(old_urgency + 1, BytecodeArray::kMaxOsrUrgency);
|
||||
int old_urgency = function.feedback_vector().osr_urgency();
|
||||
int new_urgency = std::min(old_urgency + 1, FeedbackVector::kMaxOsrUrgency);
|
||||
TrySetOsrUrgency(isolate, function, new_urgency);
|
||||
}
|
||||
|
||||
void TryRequestOsrAtNextOpportunity(Isolate* isolate, JSFunction function) {
|
||||
TrySetOsrUrgency(isolate, function, BytecodeArray::kMaxOsrUrgency);
|
||||
}
|
||||
|
||||
void TrySetOsrInstallTarget(Isolate* isolate, JSFunction function,
|
||||
BytecodeOffset osr_offset) {
|
||||
DCHECK(!osr_offset.IsNone());
|
||||
SharedFunctionInfo shared = function.shared();
|
||||
|
||||
if (V8_UNLIKELY(!FLAG_use_osr)) return;
|
||||
if (V8_UNLIKELY(!shared.IsUserJavaScript())) return;
|
||||
if (V8_UNLIKELY(shared.optimization_disabled())) return;
|
||||
|
||||
BytecodeArray bytecode = shared.GetBytecodeArray(isolate);
|
||||
if (V8_UNLIKELY(FLAG_trace_osr)) {
|
||||
PrintF(CodeTracer::Scope{isolate->GetCodeTracer()}.file(),
|
||||
"[OSR - requesting install. function: %s, osr offset: %d]\n",
|
||||
function.DebugNameCStr().get(), osr_offset.ToInt());
|
||||
}
|
||||
|
||||
bytecode.set_osr_install_target(osr_offset);
|
||||
TrySetOsrUrgency(isolate, function, FeedbackVector::kMaxOsrUrgency);
|
||||
}
|
||||
|
||||
bool ShouldOptimizeAsSmallFunction(int bytecode_size, bool any_ic_changed) {
|
||||
@ -303,13 +262,6 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function,
|
||||
// Continue below and do a normal optimized compile as well.
|
||||
}
|
||||
|
||||
// If we have matching cached OSR'd code, request OSR at the next opportunity.
|
||||
BytecodeOffset osr_offset_for_cached_osr_code = BytecodeOffset::None();
|
||||
if (HaveCachedOSRCodeForCurrentBytecodeOffset(
|
||||
frame, &osr_offset_for_cached_osr_code)) {
|
||||
TrySetOsrInstallTarget(isolate_, function, osr_offset_for_cached_osr_code);
|
||||
}
|
||||
|
||||
const bool is_marked_for_any_optimization =
|
||||
(static_cast<uint32_t>(tiering_state) & kNoneOrInProgressMask) != 0;
|
||||
if (is_marked_for_any_optimization || function.HasAvailableOptimizedCode()) {
|
||||
|
@ -247,7 +247,6 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
|
||||
instance.set_parameter_count(parameter_count);
|
||||
instance.set_incoming_new_target_or_generator_register(
|
||||
interpreter::Register::invalid_value());
|
||||
instance.reset_osr_urgency_and_install_target();
|
||||
instance.set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
|
||||
instance.set_constant_pool(*constant_pool);
|
||||
instance.set_handler_table(read_only_roots().empty_byte_array(),
|
||||
|
@ -1186,7 +1186,6 @@ Handle<NativeContext> Factory::NewNativeContext() {
|
||||
context.set_math_random_index(Smi::zero());
|
||||
context.set_serialized_objects(*empty_fixed_array());
|
||||
context.set_microtask_queue(isolate(), nullptr);
|
||||
context.set_osr_code_cache(*OSROptimizedCodeCache::Empty(isolate()));
|
||||
context.set_retained_maps(*empty_weak_array_list());
|
||||
return handle(context, isolate());
|
||||
}
|
||||
@ -2418,8 +2417,6 @@ Handle<BytecodeArray> Factory::CopyBytecodeArray(Handle<BytecodeArray> source) {
|
||||
copy.set_handler_table(raw_source.handler_table());
|
||||
copy.set_source_position_table(raw_source.source_position_table(kAcquireLoad),
|
||||
kReleaseStore);
|
||||
copy.set_osr_urgency_and_install_target(
|
||||
raw_source.osr_urgency_and_install_target());
|
||||
copy.set_bytecode_age(raw_source.bytecode_age());
|
||||
raw_source.CopyBytecodesTo(copy);
|
||||
return handle(copy, isolate());
|
||||
|
@ -84,9 +84,9 @@ void LoopBuilder::JumpToHeader(int loop_depth, LoopBuilder* const parent_loop) {
|
||||
// The loop must have closed form, i.e. all loop elements are within the
|
||||
// loop, the loop header precedes the body and next elements in the loop.
|
||||
int slot_index = feedback_vector_spec_->AddJumpLoopSlot().ToInt();
|
||||
builder()->JumpLoop(&loop_header_,
|
||||
std::min(loop_depth, BytecodeArray::kMaxOsrUrgency - 1),
|
||||
source_position_, slot_index);
|
||||
builder()->JumpLoop(
|
||||
&loop_header_, std::min(loop_depth, FeedbackVector::kMaxOsrUrgency - 1),
|
||||
source_position_, slot_index);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -683,9 +683,12 @@ InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
|
||||
return SmiUntag(CAST(LoadConstantPoolEntryAtOperandIndex(operand_index)));
|
||||
}
|
||||
|
||||
TNode<JSFunction> InterpreterAssembler::LoadFunctionClosure() {
|
||||
return CAST(LoadRegister(Register::function_closure()));
|
||||
}
|
||||
|
||||
TNode<HeapObject> InterpreterAssembler::LoadFeedbackVector() {
|
||||
TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
|
||||
return CodeStubAssembler::LoadFeedbackVector(function);
|
||||
return CodeStubAssembler::LoadFeedbackVector(LoadFunctionClosure());
|
||||
}
|
||||
|
||||
void InterpreterAssembler::CallPrologue() {
|
||||
@ -1313,12 +1316,12 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
|
||||
UpdateInterruptBudget(profiling_weight, true);
|
||||
}
|
||||
|
||||
TNode<Int16T> InterpreterAssembler::LoadOsrUrgencyAndInstallTarget() {
|
||||
// We're loading a 16-bit field, mask it.
|
||||
return UncheckedCast<Int16T>(Word32And(
|
||||
LoadObjectField<Int16T>(BytecodeArrayTaggedPointer(),
|
||||
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
|
||||
0xFFFF));
|
||||
TNode<Int8T> InterpreterAssembler::LoadOsrState(
|
||||
TNode<FeedbackVector> feedback_vector) {
|
||||
// We're loading an 8-bit field, mask it.
|
||||
return UncheckedCast<Int8T>(Word32And(
|
||||
LoadObjectField<Int8T>(feedback_vector, FeedbackVector::kOsrStateOffset),
|
||||
0xFF));
|
||||
}
|
||||
|
||||
void InterpreterAssembler::Abort(AbortReason abort_reason) {
|
||||
@ -1340,42 +1343,68 @@ void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs,
|
||||
}
|
||||
|
||||
void InterpreterAssembler::OnStackReplacement(
|
||||
TNode<Context> context, TNode<IntPtrT> relative_jump,
|
||||
TNode<Int32T> loop_depth, TNode<Int16T> osr_urgency_and_install_target) {
|
||||
Label interpreter(this), baseline(this);
|
||||
TNode<Context> context, TNode<FeedbackVector> feedback_vector,
|
||||
TNode<IntPtrT> relative_jump, TNode<Int32T> loop_depth,
|
||||
TNode<IntPtrT> feedback_slot, TNode<Int8T> osr_state,
|
||||
OnStackReplacementParams params) {
|
||||
// Three cases may cause us to attempt OSR, in the following order:
|
||||
//
|
||||
// 1) Presence of cached OSR Turbofan code.
|
||||
// 2) Presence of cached OSR Sparkplug code.
|
||||
// 3) The OSR urgency exceeds the current loop depth - in that case, trigger
|
||||
// a Turbofan OSR compilation.
|
||||
TVARIABLE(Object, maybe_target_code, SmiConstant(0));
|
||||
Label osr_to_turbofan(this), osr_to_sparkplug(this);
|
||||
|
||||
// Case 1).
|
||||
{
|
||||
TNode<JSFunction> function =
|
||||
CAST(LoadRegister(Register::function_closure()));
|
||||
TNode<HeapObject> shared_info = LoadJSFunctionSharedFunctionInfo(function);
|
||||
TNode<Object> sfi_data =
|
||||
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
|
||||
TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
|
||||
Branch(InstanceTypeEqual(data_type, CODET_TYPE), &baseline, &interpreter);
|
||||
Label next(this);
|
||||
TNode<MaybeObject> maybe_cached_osr_code =
|
||||
LoadFeedbackVectorSlot(feedback_vector, feedback_slot);
|
||||
GotoIf(IsCleared(maybe_cached_osr_code), &next);
|
||||
maybe_target_code = GetHeapObjectAssumeWeak(maybe_cached_osr_code);
|
||||
|
||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||
GotoIfNot(IsMarkedForDeoptimization(CAST(maybe_target_code.value())),
|
||||
&osr_to_turbofan);
|
||||
StoreFeedbackVectorSlot(feedback_vector, Unsigned(feedback_slot),
|
||||
ClearedValue(), UNSAFE_SKIP_WRITE_BARRIER);
|
||||
maybe_target_code = SmiConstant(0);
|
||||
|
||||
Goto(&next);
|
||||
BIND(&next);
|
||||
}
|
||||
|
||||
BIND(&interpreter);
|
||||
{
|
||||
// Encode the current bytecode offset as
|
||||
//
|
||||
// BytecodeArray::OsrInstallTargetFor(
|
||||
// BytecodeOffset{iterator().current_offset()})
|
||||
// << BytecodeArray::OsrInstallTargetBits::kShift
|
||||
static constexpr int kShift = BytecodeArray::OsrInstallTargetBits::kShift;
|
||||
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
|
||||
TNode<Word32T> encoded_bytecode_offset = Word32Or(
|
||||
Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()), kFirstBytecodeOffset),
|
||||
Int32Constant(1));
|
||||
encoded_bytecode_offset = Word32And(
|
||||
Word32Shl(UncheckedCast<Int32T>(encoded_bytecode_offset), kShift),
|
||||
kMask);
|
||||
// Case 2).
|
||||
if (params == OnStackReplacementParams::kBaselineCodeIsCached) {
|
||||
Goto(&osr_to_sparkplug);
|
||||
} else {
|
||||
DCHECK_EQ(params, OnStackReplacementParams::kDefault);
|
||||
TNode<SharedFunctionInfo> sfi = LoadObjectField<SharedFunctionInfo>(
|
||||
LoadFunctionClosure(), JSFunction::kSharedFunctionInfoOffset);
|
||||
TNode<HeapObject> sfi_data = LoadObjectField<HeapObject>(
|
||||
sfi, SharedFunctionInfo::kFunctionDataOffset);
|
||||
GotoIf(InstanceTypeEqual(LoadInstanceType(sfi_data), CODET_TYPE),
|
||||
&osr_to_sparkplug);
|
||||
|
||||
// Case 3).
|
||||
{
|
||||
static_assert(FeedbackVector::OsrUrgencyBits::kShift == 0);
|
||||
TNode<Int32T> osr_urgency = Word32And(
|
||||
osr_state, Int32Constant(FeedbackVector::OsrUrgencyBits::kMask));
|
||||
GotoIf(Uint32LessThan(loop_depth, osr_urgency), &osr_to_turbofan);
|
||||
JumpBackward(relative_jump);
|
||||
}
|
||||
}
|
||||
|
||||
BIND(&osr_to_turbofan);
|
||||
{
|
||||
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
|
||||
CallStub(callable, context, loop_depth, encoded_bytecode_offset,
|
||||
osr_urgency_and_install_target);
|
||||
CallStub(callable, context, maybe_target_code.value());
|
||||
JumpBackward(relative_jump);
|
||||
}
|
||||
|
||||
BIND(&baseline);
|
||||
BIND(&osr_to_sparkplug);
|
||||
{
|
||||
Callable callable =
|
||||
CodeFactory::InterpreterOnStackReplacement_ToBaseline(isolate());
|
||||
|
@ -140,6 +140,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
|
||||
// Load and untag constant at |index| in the constant pool.
|
||||
TNode<IntPtrT> LoadAndUntagConstantPoolEntry(TNode<WordT> index);
|
||||
|
||||
TNode<JSFunction> LoadFunctionClosure();
|
||||
|
||||
// Load the FeedbackVector for the current function. The retuned node could be
|
||||
// undefined.
|
||||
TNode<HeapObject> LoadFeedbackVector();
|
||||
@ -234,8 +236,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
|
||||
// Updates the profiler interrupt budget for a return.
|
||||
void UpdateInterruptBudgetOnReturn();
|
||||
|
||||
// Returns the OSR urgency and install target from the bytecode header.
|
||||
TNode<Int16T> LoadOsrUrgencyAndInstallTarget();
|
||||
TNode<Int8T> LoadOsrState(TNode<FeedbackVector> feedback_vector);
|
||||
|
||||
// Dispatch to the bytecode.
|
||||
void Dispatch();
|
||||
@ -263,12 +264,17 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
|
||||
TNode<FixedArrayBase> parameters_and_registers,
|
||||
TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count);
|
||||
|
||||
// Attempts to OSR; note this may fail in some cases, e.g. on a mismatched
|
||||
// install target, or if there's no compiled code object available yet
|
||||
// (concurrent OSR).
|
||||
void OnStackReplacement(TNode<Context> context, TNode<IntPtrT> relative_jump,
|
||||
// Attempts to OSR.
|
||||
enum OnStackReplacementParams {
|
||||
kBaselineCodeIsCached,
|
||||
kDefault,
|
||||
};
|
||||
void OnStackReplacement(TNode<Context> context,
|
||||
TNode<FeedbackVector> feedback_vector,
|
||||
TNode<IntPtrT> relative_jump,
|
||||
TNode<Int32T> loop_depth,
|
||||
TNode<Int16T> osr_urgency_and_install_target);
|
||||
TNode<IntPtrT> feedback_slot, TNode<Int8T> osr_state,
|
||||
OnStackReplacementParams params);
|
||||
|
||||
// The BytecodeOffset() is the offset from the ByteCodeArray pointer; to
|
||||
// translate into runtime `BytecodeOffset` (defined in utils.h as the offset
|
||||
|
@ -2169,26 +2169,52 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
|
||||
// performs a loop nesting check, a stack check, and potentially triggers OSR.
|
||||
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
|
||||
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
|
||||
TNode<Int32T> loop_depth = BytecodeOperandImm(1);
|
||||
TNode<Int16T> osr_urgency_and_install_target =
|
||||
LoadOsrUrgencyAndInstallTarget();
|
||||
TNode<Context> context = GetContext();
|
||||
|
||||
// OSR requests can be triggered either through urgency (when > the current
|
||||
// loop depth), or an explicit install target (= the lower bits of the
|
||||
// targeted bytecode offset).
|
||||
Label ok(this), maybe_osr(this);
|
||||
Branch(Int32GreaterThanOrEqual(loop_depth, osr_urgency_and_install_target),
|
||||
&ok, &maybe_osr);
|
||||
Label ok(this);
|
||||
TNode<FeedbackVector> feedback_vector =
|
||||
CodeStubAssembler::LoadFeedbackVector(LoadFunctionClosure(), &ok);
|
||||
TNode<Int8T> osr_state = LoadOsrState(feedback_vector);
|
||||
TNode<Int32T> loop_depth = BytecodeOperandImm(1);
|
||||
|
||||
Label maybe_osr_because_baseline(this),
|
||||
maybe_osr_because_osr_state(this, Label::kDeferred);
|
||||
// The quick initial OSR check. If it passes, we proceed on to more expensive
|
||||
// OSR logic.
|
||||
static_assert(FeedbackVector::MaybeHasOptimizedOsrCodeBit::encode(true) >
|
||||
FeedbackVector::kMaxOsrUrgency);
|
||||
GotoIfNot(Uint32GreaterThanOrEqual(loop_depth, osr_state),
|
||||
&maybe_osr_because_osr_state);
|
||||
|
||||
// Perhaps we've got cached baseline code?
|
||||
TNode<SharedFunctionInfo> sfi = LoadObjectField<SharedFunctionInfo>(
|
||||
LoadFunctionClosure(), JSFunction::kSharedFunctionInfoOffset);
|
||||
TNode<HeapObject> sfi_data =
|
||||
LoadObjectField<HeapObject>(sfi, SharedFunctionInfo::kFunctionDataOffset);
|
||||
Branch(InstanceTypeEqual(LoadInstanceType(sfi_data), CODET_TYPE),
|
||||
&maybe_osr_because_baseline, &ok);
|
||||
|
||||
BIND(&ok);
|
||||
// The backward jump can trigger a budget interrupt, which can handle stack
|
||||
// interrupts, so we don't need to explicitly handle them here.
|
||||
JumpBackward(relative_jump);
|
||||
|
||||
BIND(&maybe_osr);
|
||||
OnStackReplacement(context, relative_jump, loop_depth,
|
||||
osr_urgency_and_install_target);
|
||||
BIND(&maybe_osr_because_baseline);
|
||||
{
|
||||
TNode<Context> context = GetContext();
|
||||
TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(2));
|
||||
OnStackReplacement(context, feedback_vector, relative_jump, loop_depth,
|
||||
slot_index, osr_state,
|
||||
OnStackReplacementParams::kBaselineCodeIsCached);
|
||||
}
|
||||
|
||||
BIND(&maybe_osr_because_osr_state);
|
||||
{
|
||||
TNode<Context> context = GetContext();
|
||||
TNode<IntPtrT> slot_index = Signed(BytecodeOperandIdx(2));
|
||||
OnStackReplacement(context, feedback_vector, relative_jump, loop_depth,
|
||||
slot_index, osr_state,
|
||||
OnStackReplacementParams::kDefault);
|
||||
}
|
||||
}
|
||||
|
||||
// SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base>
|
||||
|
@ -63,7 +63,6 @@
|
||||
#include "src/objects/objects-inl.h"
|
||||
#include "src/objects/oddball-inl.h"
|
||||
#include "src/objects/ordered-hash-table-inl.h"
|
||||
#include "src/objects/osr-optimized-code-cache-inl.h"
|
||||
#include "src/objects/primitive-heap-object-inl.h"
|
||||
#include "src/objects/promise-inl.h"
|
||||
#include "src/objects/property-array-inl.h"
|
||||
|
@ -1187,50 +1187,12 @@ void BytecodeArray::set_incoming_new_target_or_generator_register(
|
||||
}
|
||||
}
|
||||
|
||||
int BytecodeArray::osr_urgency() const {
|
||||
return OsrUrgencyBits::decode(osr_urgency_and_install_target());
|
||||
}
|
||||
|
||||
void BytecodeArray::set_osr_urgency(int urgency) {
|
||||
DCHECK(0 <= urgency && urgency <= BytecodeArray::kMaxOsrUrgency);
|
||||
STATIC_ASSERT(BytecodeArray::kMaxOsrUrgency <= OsrUrgencyBits::kMax);
|
||||
uint32_t value = osr_urgency_and_install_target();
|
||||
set_osr_urgency_and_install_target(OsrUrgencyBits::update(value, urgency));
|
||||
}
|
||||
|
||||
BytecodeArray::Age BytecodeArray::bytecode_age() const {
|
||||
// Bytecode is aged by the concurrent marker.
|
||||
static_assert(kBytecodeAgeSize == kUInt16Size);
|
||||
return static_cast<Age>(RELAXED_READ_INT16_FIELD(*this, kBytecodeAgeOffset));
|
||||
}
|
||||
|
||||
void BytecodeArray::reset_osr_urgency() { set_osr_urgency(0); }
|
||||
|
||||
void BytecodeArray::RequestOsrAtNextOpportunity() {
|
||||
set_osr_urgency(kMaxOsrUrgency);
|
||||
}
|
||||
|
||||
int BytecodeArray::osr_install_target() {
|
||||
return OsrInstallTargetBits::decode(osr_urgency_and_install_target());
|
||||
}
|
||||
|
||||
void BytecodeArray::set_osr_install_target(BytecodeOffset jump_loop_offset) {
|
||||
DCHECK_LE(jump_loop_offset.ToInt(), length());
|
||||
set_osr_urgency_and_install_target(OsrInstallTargetBits::update(
|
||||
osr_urgency_and_install_target(), OsrInstallTargetFor(jump_loop_offset)));
|
||||
}
|
||||
|
||||
void BytecodeArray::reset_osr_install_target() {
|
||||
uint32_t value = osr_urgency_and_install_target();
|
||||
set_osr_urgency_and_install_target(
|
||||
OsrInstallTargetBits::update(value, kNoOsrInstallTarget));
|
||||
}
|
||||
|
||||
void BytecodeArray::reset_osr_urgency_and_install_target() {
|
||||
set_osr_urgency_and_install_target(OsrUrgencyBits::encode(0) |
|
||||
OsrInstallTargetBits::encode(0));
|
||||
}
|
||||
|
||||
void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
|
||||
DCHECK_GE(age, kFirstBytecodeAge);
|
||||
DCHECK_LE(age, kLastBytecodeAge);
|
||||
|
@ -610,7 +610,6 @@ void BytecodeArray::Disassemble(std::ostream& os) {
|
||||
os << "Parameter count " << parameter_count() << "\n";
|
||||
os << "Register count " << register_count() << "\n";
|
||||
os << "Frame size " << frame_size() << "\n";
|
||||
os << "OSR urgency: " << osr_urgency() << "\n";
|
||||
os << "Bytecode age: " << bytecode_age() << "\n";
|
||||
|
||||
Address base_address = GetFirstBytecodeAddress();
|
||||
|
@ -952,8 +952,6 @@ DEFINE_OPERATORS_FOR_FLAGS(DependentCode::DependencyGroups)
|
||||
class BytecodeArray
|
||||
: public TorqueGeneratedBytecodeArray<BytecodeArray, FixedArrayBase> {
|
||||
public:
|
||||
DEFINE_TORQUE_GENERATED_OSRURGENCY_AND_INSTALL_TARGET()
|
||||
|
||||
enum Age {
|
||||
kNoAgeBytecodeAge = 0,
|
||||
kQuadragenarianBytecodeAge,
|
||||
@ -992,48 +990,10 @@ class BytecodeArray
|
||||
inline void set_incoming_new_target_or_generator_register(
|
||||
interpreter::Register incoming_new_target_or_generator_register);
|
||||
|
||||
// The [osr_urgency] controls when OSR is attempted, and is incremented as
|
||||
// the function becomes hotter. When the current loop depth is less than the
|
||||
// osr_urgency, JumpLoop calls into runtime to attempt OSR optimization.
|
||||
static constexpr int kMaxOsrUrgency = 6;
|
||||
STATIC_ASSERT(kMaxOsrUrgency <= OsrUrgencyBits::kMax);
|
||||
inline int osr_urgency() const;
|
||||
inline void set_osr_urgency(int urgency);
|
||||
inline void reset_osr_urgency();
|
||||
inline void RequestOsrAtNextOpportunity();
|
||||
|
||||
// The [osr_install_target] is used upon finishing concurrent OSR
|
||||
// compilation; instead of bumping the osr_urgency (which would target all
|
||||
// JumpLoops of appropriate loop_depth), we target a specific JumpLoop at the
|
||||
// given bytecode offset.
|
||||
static constexpr int kNoOsrInstallTarget = 0;
|
||||
static constexpr int OsrInstallTargetFor(BytecodeOffset offset) {
|
||||
// Any set `osr_install_target` must be non-zero since zero is the 'unset'
|
||||
// value and is ignored by generated code. For branchless code (both here
|
||||
// and in generated code), we simply OR in a 1.
|
||||
STATIC_ASSERT(kNoOsrInstallTarget == 0);
|
||||
return (offset.ToInt() | 1) &
|
||||
(OsrInstallTargetBits::kMask >> OsrInstallTargetBits::kShift);
|
||||
}
|
||||
|
||||
inline int osr_install_target();
|
||||
inline void set_osr_install_target(BytecodeOffset jump_loop_offset);
|
||||
inline void reset_osr_install_target();
|
||||
|
||||
inline void reset_osr_urgency_and_install_target();
|
||||
|
||||
static constexpr int kBytecodeAgeSize = kUInt16Size;
|
||||
static_assert(kBytecodeAgeOffset + kBytecodeAgeSize - 1 ==
|
||||
kBytecodeAgeOffsetEnd);
|
||||
|
||||
// InterpreterEntryTrampoline and other builtins expect these fields to be
|
||||
// next to each other and fill 32 bits in total, since they write a 32-bit
|
||||
// value to reset them.
|
||||
static constexpr bool kOsrStateAndBytecodeAgeAreContiguous32Bits =
|
||||
kBytecodeAgeOffset == kOsrUrgencyAndInstallTargetOffset + kUInt16Size &&
|
||||
kBytecodeAgeSize == kUInt16Size;
|
||||
static_assert(kOsrStateAndBytecodeAgeAreContiguous32Bits);
|
||||
|
||||
inline Age bytecode_age() const;
|
||||
inline void set_bytecode_age(Age age);
|
||||
|
||||
|
@ -4,14 +4,6 @@
|
||||
|
||||
type DependentCode extends WeakFixedArray;
|
||||
|
||||
bitfield struct OSRUrgencyAndInstallTarget extends uint16 {
|
||||
// The layout is chosen s.t. urgency and the install target offset can be
|
||||
// loaded with a single 16-bit load (i.e. no masking required).
|
||||
osr_urgency: uint32: 3 bit;
|
||||
// The 13 LSB of the install target bytecode offset.
|
||||
osr_install_target: uint32: 13 bit;
|
||||
}
|
||||
|
||||
extern class BytecodeArray extends FixedArrayBase {
|
||||
// TODO(v8:8983): bytecode array object sizes vary based on their contents.
|
||||
constant_pool: FixedArray;
|
||||
@ -30,8 +22,8 @@ extern class BytecodeArray extends FixedArrayBase {
|
||||
frame_size: int32;
|
||||
parameter_size: int32;
|
||||
incoming_new_target_or_generator_register: int32;
|
||||
osr_urgency_and_install_target: OSRUrgencyAndInstallTarget;
|
||||
bytecode_age: uint16; // Only 3 bits used.
|
||||
// TODO(jgruber): Only 3 bits are in use; if bits are needed, steal here.
|
||||
bytecode_age: uint16;
|
||||
}
|
||||
|
||||
extern class CodeDataContainer extends HeapObject;
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include "src/objects/map-inl.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
#include "src/objects/ordered-hash-table-inl.h"
|
||||
#include "src/objects/osr-optimized-code-cache-inl.h"
|
||||
#include "src/objects/regexp-match-info.h"
|
||||
#include "src/objects/scope-info.h"
|
||||
#include "src/objects/shared-function-info.h"
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include "src/objects/fixed-array.h"
|
||||
#include "src/objects/function-kind.h"
|
||||
#include "src/objects/ordered-hash-table.h"
|
||||
#include "src/objects/osr-optimized-code-cache.h"
|
||||
// Has to be the last include (doesn't have include guards):
|
||||
#include "src/objects/object-macros.h"
|
||||
|
||||
@ -368,8 +367,7 @@ enum ContextLookupFlags {
|
||||
V(WEAKMAP_DELETE_INDEX, JSFunction, weakmap_delete) \
|
||||
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
|
||||
V(WRAPPED_FUNCTION_MAP_INDEX, Map, wrapped_function_map) \
|
||||
V(RETAINED_MAPS, Object, retained_maps) \
|
||||
V(OSR_CODE_CACHE_INDEX, OSROptimizedCodeCache, osr_code_cache)
|
||||
V(RETAINED_MAPS, Object, retained_maps)
|
||||
|
||||
#include "torque-generated/src/objects/contexts-tq.inc"
|
||||
|
||||
|
@ -1,25 +0,0 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_INL_H_
|
||||
#define V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_INL_H_
|
||||
|
||||
#include "src/objects/osr-optimized-code-cache.h"
|
||||
|
||||
#include "src/objects/fixed-array-inl.h"
|
||||
// Has to be the last include (doesn't have include guards):
|
||||
#include "src/objects/object-macros.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
OBJECT_CONSTRUCTORS_IMPL(OSROptimizedCodeCache, WeakFixedArray)
|
||||
CAST_ACCESSOR(OSROptimizedCodeCache)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#include "src/objects/object-macros-undef.h"
|
||||
|
||||
#endif // V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_INL_H_
|
@ -1,303 +0,0 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/objects/osr-optimized-code-cache.h"
|
||||
|
||||
#include "src/execution/isolate-inl.h"
|
||||
#include "src/objects/code.h"
|
||||
#include "src/objects/maybe-object.h"
|
||||
#include "src/objects/shared-function-info.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// static
|
||||
Handle<OSROptimizedCodeCache> OSROptimizedCodeCache::Empty(Isolate* isolate) {
|
||||
return Handle<OSROptimizedCodeCache>::cast(
|
||||
isolate->factory()->empty_weak_fixed_array());
|
||||
}
|
||||
|
||||
// static
|
||||
void OSROptimizedCodeCache::Insert(Isolate* isolate,
|
||||
Handle<NativeContext> native_context,
|
||||
Handle<SharedFunctionInfo> shared,
|
||||
Handle<CodeT> code,
|
||||
BytecodeOffset osr_offset) {
|
||||
DCHECK(!osr_offset.IsNone());
|
||||
DCHECK(!isolate->serializer_enabled());
|
||||
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
|
||||
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
|
||||
if (shared->osr_code_cache_state() == kNotCached) {
|
||||
DCHECK_EQ(osr_cache->FindEntry(*shared, osr_offset), -1);
|
||||
} else if (osr_cache->FindEntry(*shared, osr_offset) != -1) {
|
||||
return; // Already cached for a different JSFunction.
|
||||
}
|
||||
|
||||
STATIC_ASSERT(kEntryLength == 3);
|
||||
int entry = -1;
|
||||
for (int index = 0; index < osr_cache->length(); index += kEntryLength) {
|
||||
if (osr_cache->Get(index + kSharedOffset)->IsCleared() ||
|
||||
osr_cache->Get(index + kCachedCodeOffset)->IsCleared()) {
|
||||
entry = index;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (entry == -1) {
|
||||
if (osr_cache->length() + kEntryLength <= kMaxLength) {
|
||||
entry = GrowOSRCache(isolate, native_context, &osr_cache);
|
||||
} else {
|
||||
// We reached max capacity and cannot grow further. Reuse an existing
|
||||
// entry.
|
||||
// TODO(mythria): We could use better mechanisms (like lru) to replace
|
||||
// existing entries. Though we don't expect this to be a common case, so
|
||||
// for now choosing to replace the first entry.
|
||||
osr_cache->ClearEntry(0, isolate);
|
||||
entry = 0;
|
||||
}
|
||||
}
|
||||
|
||||
osr_cache->InitializeEntry(entry, *shared, *code, osr_offset);
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::Clear(Isolate* isolate,
|
||||
NativeContext native_context) {
|
||||
native_context.set_osr_code_cache(*OSROptimizedCodeCache::Empty(isolate));
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::Compact(Isolate* isolate,
|
||||
Handle<NativeContext> native_context) {
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
|
||||
// Re-adjust the cache so all the valid entries are on one side. This will
|
||||
// enable us to compress the cache if needed.
|
||||
int curr_valid_index = 0;
|
||||
for (int curr_index = 0; curr_index < osr_cache->length();
|
||||
curr_index += kEntryLength) {
|
||||
if (osr_cache->Get(curr_index + kSharedOffset)->IsCleared() ||
|
||||
osr_cache->Get(curr_index + kCachedCodeOffset)->IsCleared()) {
|
||||
continue;
|
||||
}
|
||||
if (curr_valid_index != curr_index) {
|
||||
osr_cache->MoveEntry(curr_index, curr_valid_index, isolate);
|
||||
}
|
||||
curr_valid_index += kEntryLength;
|
||||
}
|
||||
|
||||
if (!NeedsTrimming(curr_valid_index, osr_cache->length())) return;
|
||||
|
||||
Handle<OSROptimizedCodeCache> new_osr_cache =
|
||||
Handle<OSROptimizedCodeCache>::cast(isolate->factory()->NewWeakFixedArray(
|
||||
CapacityForLength(curr_valid_index), AllocationType::kOld));
|
||||
DCHECK_LT(new_osr_cache->length(), osr_cache->length());
|
||||
{
|
||||
DisallowGarbageCollection no_gc;
|
||||
new_osr_cache->CopyElements(isolate, 0, *osr_cache, 0,
|
||||
new_osr_cache->length(),
|
||||
new_osr_cache->GetWriteBarrierMode(no_gc));
|
||||
}
|
||||
native_context->set_osr_code_cache(*new_osr_cache);
|
||||
}
|
||||
|
||||
CodeT OSROptimizedCodeCache::TryGet(SharedFunctionInfo shared,
|
||||
BytecodeOffset osr_offset,
|
||||
Isolate* isolate) {
|
||||
DisallowGarbageCollection no_gc;
|
||||
int index = FindEntry(shared, osr_offset);
|
||||
if (index == -1) return {};
|
||||
|
||||
CodeT code = GetCodeFromEntry(index);
|
||||
if (code.is_null()) {
|
||||
ClearEntry(index, isolate);
|
||||
return {};
|
||||
}
|
||||
|
||||
DCHECK(code.is_optimized_code() && !code.marked_for_deoptimization());
|
||||
return code;
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::EvictDeoptimizedCode(Isolate* isolate) {
|
||||
// This is called from DeoptimizeMarkedCodeForContext that uses raw pointers
|
||||
// and hence the DisallowGarbageCollection scope here.
|
||||
DisallowGarbageCollection no_gc;
|
||||
for (int index = 0; index < length(); index += kEntryLength) {
|
||||
MaybeObject code_entry = Get(index + kCachedCodeOffset);
|
||||
HeapObject heap_object;
|
||||
if (!code_entry->GetHeapObject(&heap_object)) continue;
|
||||
|
||||
CodeT code = CodeT::cast(heap_object);
|
||||
DCHECK(code.is_optimized_code());
|
||||
if (!code.marked_for_deoptimization()) continue;
|
||||
|
||||
ClearEntry(index, isolate);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<BytecodeOffset> OSROptimizedCodeCache::OsrOffsetsFor(
|
||||
SharedFunctionInfo shared) {
|
||||
DisallowGarbageCollection gc;
|
||||
|
||||
const OSRCodeCacheStateOfSFI state = shared.osr_code_cache_state();
|
||||
if (state == kNotCached) return {};
|
||||
|
||||
std::vector<BytecodeOffset> offsets;
|
||||
for (int index = 0; index < length(); index += kEntryLength) {
|
||||
if (GetSFIFromEntry(index) != shared) continue;
|
||||
offsets.emplace_back(GetBytecodeOffsetFromEntry(index));
|
||||
if (state == kCachedOnce) return offsets;
|
||||
}
|
||||
|
||||
return offsets;
|
||||
}
|
||||
|
||||
base::Optional<BytecodeOffset> OSROptimizedCodeCache::FirstOsrOffsetFor(
|
||||
SharedFunctionInfo shared) {
|
||||
DisallowGarbageCollection gc;
|
||||
|
||||
const OSRCodeCacheStateOfSFI state = shared.osr_code_cache_state();
|
||||
if (state == kNotCached) return {};
|
||||
|
||||
for (int index = 0; index < length(); index += kEntryLength) {
|
||||
if (GetSFIFromEntry(index) != shared) continue;
|
||||
return GetBytecodeOffsetFromEntry(index);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
int OSROptimizedCodeCache::GrowOSRCache(
|
||||
Isolate* isolate, Handle<NativeContext> native_context,
|
||||
Handle<OSROptimizedCodeCache>* osr_cache) {
|
||||
int old_length = (*osr_cache)->length();
|
||||
int grow_by = CapacityForLength(old_length) - old_length;
|
||||
DCHECK_GT(grow_by, kEntryLength);
|
||||
*osr_cache = Handle<OSROptimizedCodeCache>::cast(
|
||||
isolate->factory()->CopyWeakFixedArrayAndGrow(*osr_cache, grow_by));
|
||||
for (int i = old_length; i < (*osr_cache)->length(); i++) {
|
||||
(*osr_cache)->Set(i, HeapObjectReference::ClearedValue(isolate));
|
||||
}
|
||||
native_context->set_osr_code_cache(**osr_cache);
|
||||
|
||||
return old_length;
|
||||
}
|
||||
|
||||
CodeT OSROptimizedCodeCache::GetCodeFromEntry(int index) {
|
||||
DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
|
||||
DCHECK_EQ(index % kEntryLength, 0);
|
||||
HeapObject code_entry;
|
||||
Get(index + OSRCodeCacheConstants::kCachedCodeOffset)
|
||||
->GetHeapObject(&code_entry);
|
||||
if (code_entry.is_null()) return CodeT();
|
||||
return CodeT::cast(code_entry);
|
||||
}
|
||||
|
||||
SharedFunctionInfo OSROptimizedCodeCache::GetSFIFromEntry(int index) {
|
||||
DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
|
||||
DCHECK_EQ(index % kEntryLength, 0);
|
||||
HeapObject sfi_entry;
|
||||
Get(index + OSRCodeCacheConstants::kSharedOffset)->GetHeapObject(&sfi_entry);
|
||||
return sfi_entry.is_null() ? SharedFunctionInfo()
|
||||
: SharedFunctionInfo::cast(sfi_entry);
|
||||
}
|
||||
|
||||
BytecodeOffset OSROptimizedCodeCache::GetBytecodeOffsetFromEntry(int index) {
|
||||
DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
|
||||
DCHECK_EQ(index % kEntryLength, 0);
|
||||
Smi osr_offset_entry;
|
||||
Get(index + kOsrIdOffset)->ToSmi(&osr_offset_entry);
|
||||
return BytecodeOffset(osr_offset_entry.value());
|
||||
}
|
||||
|
||||
int OSROptimizedCodeCache::FindEntry(SharedFunctionInfo shared,
|
||||
BytecodeOffset osr_offset) {
|
||||
DisallowGarbageCollection no_gc;
|
||||
DCHECK(!osr_offset.IsNone());
|
||||
for (int index = 0; index < length(); index += kEntryLength) {
|
||||
if (GetSFIFromEntry(index) != shared) continue;
|
||||
if (GetBytecodeOffsetFromEntry(index) != osr_offset) continue;
|
||||
return index;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::ClearEntry(int index, Isolate* isolate) {
|
||||
SharedFunctionInfo shared = GetSFIFromEntry(index);
|
||||
DCHECK_GT(shared.osr_code_cache_state(), kNotCached);
|
||||
if (V8_LIKELY(shared.osr_code_cache_state() == kCachedOnce)) {
|
||||
shared.set_osr_code_cache_state(kNotCached);
|
||||
} else if (shared.osr_code_cache_state() == kCachedMultiple) {
|
||||
int osr_code_cache_count = 0;
|
||||
for (int index = 0; index < length(); index += kEntryLength) {
|
||||
if (GetSFIFromEntry(index) == shared) {
|
||||
osr_code_cache_count++;
|
||||
}
|
||||
}
|
||||
if (osr_code_cache_count == 2) {
|
||||
shared.set_osr_code_cache_state(kCachedOnce);
|
||||
}
|
||||
}
|
||||
HeapObjectReference cleared_value =
|
||||
HeapObjectReference::ClearedValue(isolate);
|
||||
Set(index + OSRCodeCacheConstants::kSharedOffset, cleared_value);
|
||||
Set(index + OSRCodeCacheConstants::kCachedCodeOffset, cleared_value);
|
||||
Set(index + OSRCodeCacheConstants::kOsrIdOffset, cleared_value);
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::InitializeEntry(int entry,
|
||||
SharedFunctionInfo shared,
|
||||
CodeT code,
|
||||
BytecodeOffset osr_offset) {
|
||||
Set(entry + OSRCodeCacheConstants::kSharedOffset,
|
||||
HeapObjectReference::Weak(shared));
|
||||
HeapObjectReference weak_code_entry = HeapObjectReference::Weak(code);
|
||||
Set(entry + OSRCodeCacheConstants::kCachedCodeOffset, weak_code_entry);
|
||||
Set(entry + OSRCodeCacheConstants::kOsrIdOffset,
|
||||
MaybeObject::FromSmi(Smi::FromInt(osr_offset.ToInt())));
|
||||
if (V8_LIKELY(shared.osr_code_cache_state() == kNotCached)) {
|
||||
shared.set_osr_code_cache_state(kCachedOnce);
|
||||
} else if (shared.osr_code_cache_state() == kCachedOnce) {
|
||||
shared.set_osr_code_cache_state(kCachedMultiple);
|
||||
}
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::MoveEntry(int src, int dst, Isolate* isolate) {
|
||||
Set(dst + OSRCodeCacheConstants::kSharedOffset,
|
||||
Get(src + OSRCodeCacheConstants::kSharedOffset));
|
||||
Set(dst + OSRCodeCacheConstants::kCachedCodeOffset,
|
||||
Get(src + OSRCodeCacheConstants::kCachedCodeOffset));
|
||||
Set(dst + OSRCodeCacheConstants::kOsrIdOffset, Get(src + kOsrIdOffset));
|
||||
HeapObjectReference cleared_value =
|
||||
HeapObjectReference::ClearedValue(isolate);
|
||||
Set(src + OSRCodeCacheConstants::kSharedOffset, cleared_value);
|
||||
Set(src + OSRCodeCacheConstants::kCachedCodeOffset, cleared_value);
|
||||
Set(src + OSRCodeCacheConstants::kOsrIdOffset, cleared_value);
|
||||
}
|
||||
|
||||
int OSROptimizedCodeCache::CapacityForLength(int curr_length) {
|
||||
// TODO(mythria): This is a randomly chosen heuristic and is not based on any
|
||||
// data. We may have to tune this later.
|
||||
if (curr_length == 0) return kInitialLength;
|
||||
if (curr_length * 2 > kMaxLength) return kMaxLength;
|
||||
return curr_length * 2;
|
||||
}
|
||||
|
||||
bool OSROptimizedCodeCache::NeedsTrimming(int num_valid_entries,
|
||||
int curr_length) {
|
||||
return curr_length > kInitialLength && curr_length > num_valid_entries * 3;
|
||||
}
|
||||
|
||||
MaybeObject OSROptimizedCodeCache::RawGetForTesting(int index) const {
|
||||
return WeakFixedArray::Get(index);
|
||||
}
|
||||
|
||||
void OSROptimizedCodeCache::RawSetForTesting(int index, MaybeObject value) {
|
||||
WeakFixedArray::Set(index, value);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
@ -1,118 +0,0 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_
|
||||
#define V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_
|
||||
|
||||
#include "src/objects/fixed-array.h"
|
||||
|
||||
// Has to be the last include (doesn't have include guards):
|
||||
#include "src/objects/object-macros.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// This enum is a performance optimization for accessing the OSR code cache -
|
||||
// we can skip cache iteration in many cases unless there are multiple entries
|
||||
// for a particular SharedFunctionInfo.
|
||||
enum OSRCodeCacheStateOfSFI : uint8_t {
|
||||
kNotCached, // Likely state.
|
||||
kCachedOnce, // Unlikely state, one entry.
|
||||
kCachedMultiple, // Very unlikely state, multiple entries.
|
||||
};
|
||||
|
||||
// TODO(jgruber): There are a few issues with the current implementation:
|
||||
//
|
||||
// - The cache is a flat list, thus any search operation is O(N). This resulted
|
||||
// in optimization attempts, see OSRCodeCacheStateOfSFI.
|
||||
// - We always iterate up to `length` (== capacity).
|
||||
// - We essentially reimplement WeakArrayList, i.e. growth and shrink logic.
|
||||
// - On overflow, new entries always pick slot 0.
|
||||
//
|
||||
// There are a few alternatives:
|
||||
//
|
||||
// 1) we could reuse WeakArrayList logic (but then we'd still have to
|
||||
// implement custom compaction due to our entry tuple structure).
|
||||
// 2) we could reuse CompilationCacheTable (but then we lose weakness and have
|
||||
// to deal with aging).
|
||||
// 3) we could try to base on a weak HashTable variant (EphemeronHashTable?).
|
||||
class V8_EXPORT OSROptimizedCodeCache : public WeakFixedArray {
|
||||
public:
|
||||
DECL_CAST(OSROptimizedCodeCache)
|
||||
|
||||
static Handle<OSROptimizedCodeCache> Empty(Isolate* isolate);
|
||||
|
||||
// Caches the optimized code |code| corresponding to the shared function
|
||||
// |shared| and bailout id |osr_offset| in the OSROptimized code cache.
|
||||
// If the OSR code cache wasn't created before it creates a code cache with
|
||||
// kOSRCodeCacheInitialLength entries.
|
||||
static void Insert(Isolate* isolate, Handle<NativeContext> context,
|
||||
Handle<SharedFunctionInfo> shared, Handle<CodeT> code,
|
||||
BytecodeOffset osr_offset);
|
||||
|
||||
// Returns the code corresponding to the shared function |shared| and
|
||||
// BytecodeOffset |offset| if an entry exists in the cache. Returns an empty
|
||||
// object otherwise.
|
||||
CodeT TryGet(SharedFunctionInfo shared, BytecodeOffset osr_offset,
|
||||
Isolate* isolate);
|
||||
|
||||
std::vector<BytecodeOffset> OsrOffsetsFor(SharedFunctionInfo shared);
|
||||
base::Optional<BytecodeOffset> FirstOsrOffsetFor(SharedFunctionInfo shared);
|
||||
|
||||
// Remove all code objects marked for deoptimization from OSR code cache.
|
||||
void EvictDeoptimizedCode(Isolate* isolate);
|
||||
|
||||
// Reduces the size of the OSR code cache if the number of valid entries are
|
||||
// less than the current capacity of the cache.
|
||||
static void Compact(Isolate* isolate, Handle<NativeContext> context);
|
||||
|
||||
// Sets the OSR optimized code cache to an empty array.
|
||||
static void Clear(Isolate* isolate, NativeContext context);
|
||||
|
||||
enum OSRCodeCacheConstants {
|
||||
kSharedOffset,
|
||||
kCachedCodeOffset,
|
||||
kOsrIdOffset,
|
||||
kEntryLength
|
||||
};
|
||||
|
||||
static constexpr int kInitialLength = OSRCodeCacheConstants::kEntryLength * 4;
|
||||
static constexpr int kMaxLength = OSRCodeCacheConstants::kEntryLength * 1024;
|
||||
|
||||
// For osr-code-cache-unittest.cc.
|
||||
MaybeObject RawGetForTesting(int index) const;
|
||||
void RawSetForTesting(int index, MaybeObject value);
|
||||
|
||||
private:
|
||||
// Hide raw accessors to avoid terminology confusion.
|
||||
using WeakFixedArray::Get;
|
||||
using WeakFixedArray::Set;
|
||||
|
||||
// Functions that implement heuristics on when to grow / shrink the cache.
|
||||
static int CapacityForLength(int curr_capacity);
|
||||
static bool NeedsTrimming(int num_valid_entries, int curr_capacity);
|
||||
static int GrowOSRCache(Isolate* isolate,
|
||||
Handle<NativeContext> native_context,
|
||||
Handle<OSROptimizedCodeCache>* osr_cache);
|
||||
|
||||
// Helper functions to get individual items from an entry in the cache.
|
||||
CodeT GetCodeFromEntry(int index);
|
||||
SharedFunctionInfo GetSFIFromEntry(int index);
|
||||
BytecodeOffset GetBytecodeOffsetFromEntry(int index);
|
||||
|
||||
inline int FindEntry(SharedFunctionInfo shared, BytecodeOffset osr_offset);
|
||||
inline void ClearEntry(int src, Isolate* isolate);
|
||||
inline void InitializeEntry(int entry, SharedFunctionInfo shared, CodeT code,
|
||||
BytecodeOffset osr_offset);
|
||||
inline void MoveEntry(int src, int dst, Isolate* isolate);
|
||||
|
||||
OBJECT_CONSTRUCTORS(OSROptimizedCodeCache, WeakFixedArray);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#include "src/objects/object-macros-undef.h"
|
||||
|
||||
#endif // V8_OBJECTS_OSR_OPTIMIZED_CODE_CACHE_H_
|
@ -245,37 +245,14 @@ RUNTIME_FUNCTION(Runtime_CompileOptimizedOSR) {
|
||||
BytecodeOffset osr_offset = BytecodeOffset(frame->GetBytecodeOffset());
|
||||
DCHECK(!osr_offset.IsNone());
|
||||
|
||||
ConcurrencyMode mode =
|
||||
const ConcurrencyMode mode =
|
||||
V8_LIKELY(isolate->concurrent_recompilation_enabled() &&
|
||||
FLAG_concurrent_osr)
|
||||
? ConcurrencyMode::kConcurrent
|
||||
: ConcurrencyMode::kSynchronous;
|
||||
|
||||
Handle<JSFunction> function(frame->function(), isolate);
|
||||
if (IsConcurrent(mode)) {
|
||||
// The synchronous fallback mechanism triggers if we've already got OSR'd
|
||||
// code for the current function but at a different OSR offset - that may
|
||||
// indicate we're having trouble hitting the correct JumpLoop for code
|
||||
// installation. In this case, fall back to synchronous OSR.
|
||||
base::Optional<BytecodeOffset> cached_osr_offset =
|
||||
function->native_context().osr_code_cache().FirstOsrOffsetFor(
|
||||
function->shared());
|
||||
if (cached_osr_offset.has_value() &&
|
||||
cached_osr_offset.value() != osr_offset) {
|
||||
if (V8_UNLIKELY(FLAG_trace_osr)) {
|
||||
CodeTracer::Scope scope(isolate->GetCodeTracer());
|
||||
PrintF(
|
||||
scope.file(),
|
||||
"[OSR - falling back to synchronous compilation due to mismatched "
|
||||
"cached entry. function: %s, requested: %d, cached: %d]\n",
|
||||
function->DebugNameCStr().get(), osr_offset.ToInt(),
|
||||
cached_osr_offset.value().ToInt());
|
||||
}
|
||||
mode = ConcurrencyMode::kSynchronous;
|
||||
}
|
||||
}
|
||||
|
||||
Handle<CodeT> result;
|
||||
Handle<JSFunction> function(frame->function(), isolate);
|
||||
if (!Compiler::CompileOptimizedOSR(isolate, function, osr_offset, frame, mode)
|
||||
.ToHandle(&result)) {
|
||||
// An empty result can mean one of two things:
|
||||
@ -321,25 +298,6 @@ RUNTIME_FUNCTION(Runtime_CompileOptimizedOSR) {
|
||||
function->reset_tiering_state();
|
||||
}
|
||||
|
||||
// TODO(mythria): Once we have OSR code cache we may not need to mark
|
||||
// the function for non-concurrent compilation. We could arm the loops
|
||||
// early so the second execution uses the already compiled OSR code and
|
||||
// the optimization occurs concurrently off main thread.
|
||||
if (!function->HasAvailableOptimizedCode() &&
|
||||
function->feedback_vector().invocation_count() > 1) {
|
||||
// If we're not already optimized, set to optimize non-concurrently on the
|
||||
// next call, otherwise we'd run unoptimized once more and potentially
|
||||
// compile for OSR again.
|
||||
if (FLAG_trace_osr) {
|
||||
CodeTracer::Scope scope(isolate->GetCodeTracer());
|
||||
PrintF(scope.file(),
|
||||
"[OSR - forcing synchronous optimization on next entry. function: "
|
||||
"%s]\n",
|
||||
function->DebugNameCStr().get());
|
||||
}
|
||||
function->set_tiering_state(TieringState::kRequestTurbofan_Synchronous);
|
||||
}
|
||||
|
||||
return *result;
|
||||
}
|
||||
|
||||
|
@ -615,20 +615,9 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
|
||||
return ReadOnlyRoots(isolate).undefined_value();
|
||||
}
|
||||
|
||||
// Ensure that the function is marked for non-concurrent optimization, so that
|
||||
// subsequent runs don't also optimize.
|
||||
if (FLAG_trace_osr) {
|
||||
CodeTracer::Scope scope(isolate->GetCodeTracer());
|
||||
PrintF(scope.file(), "[OSR - OptimizeOsr marking ");
|
||||
function->ShortPrint(scope.file());
|
||||
PrintF(scope.file(), " for non-concurrent optimization]\n");
|
||||
}
|
||||
IsCompiledScope is_compiled_scope(
|
||||
function->shared().is_compiled_scope(isolate));
|
||||
JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
|
||||
function->MarkForOptimization(isolate, CodeKind::TURBOFAN,
|
||||
ConcurrencyMode::kSynchronous);
|
||||
|
||||
isolate->tiering_manager()->RequestOsrAtNextOpportunity(*function);
|
||||
|
||||
// If concurrent OSR is enabled, the testing workflow is a bit tricky. We
|
||||
|
@ -557,10 +557,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
|
||||
return PostProcessNewJSReceiver(raw_map, Handle<JSReceiver>::cast(obj),
|
||||
JSReceiver::cast(raw_obj), instance_type,
|
||||
space);
|
||||
} else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
|
||||
// TODO(mythria): Remove these once we store the default values for these
|
||||
// fields in the serializer.
|
||||
BytecodeArray::cast(raw_obj).reset_osr_urgency_and_install_target();
|
||||
} else if (InstanceTypeChecker::IsDescriptorArray(instance_type)) {
|
||||
DCHECK(InstanceTypeChecker::IsStrongDescriptorArray(instance_type));
|
||||
Handle<DescriptorArray> descriptors = Handle<DescriptorArray>::cast(obj);
|
||||
|
@ -2669,8 +2669,6 @@ TEST(CodeSerializerAfterExecute) {
|
||||
|
||||
Handle<SharedFunctionInfo> sfi = v8::Utils::OpenHandle(*script);
|
||||
CHECK(sfi->HasBytecodeArray());
|
||||
BytecodeArray bytecode = sfi->GetBytecodeArray(i_isolate2);
|
||||
CHECK_EQ(bytecode.osr_urgency(), 0);
|
||||
|
||||
{
|
||||
DisallowCompilation no_compile_expected(i_isolate2);
|
||||
|
@ -49,8 +49,12 @@ function f1(should_recurse) {
|
||||
}
|
||||
assertTrue(HasBaselineCode(f1));
|
||||
gc();
|
||||
assertFalse(HasBaselineCode(f1));
|
||||
assertTrue(HasByteCode(f1));
|
||||
// TODO(jgruber, v8:12161): No longer true since we now always tier up to
|
||||
// available Sparkplug code as early as possible. By the time we reach this
|
||||
// assert, SP code is being executed and is thus alive.
|
||||
// assertFalse(HasBaselineCode(f1));
|
||||
// Also, the active tier is Sparkplug and not Ignition.
|
||||
// assertTrue(ActiveTierIsIgnition(f1));
|
||||
}
|
||||
return x.b + 10;
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ if (isNeverOptimizeLiteMode()) {
|
||||
}
|
||||
assertFalse(isAlwaysOptimize());
|
||||
|
||||
function f() {
|
||||
function f(disable_asserts) {
|
||||
do {
|
||||
do {
|
||||
for (var i = 0; i < 10; i++) {
|
||||
@ -47,6 +47,7 @@ function f() {
|
||||
// feedback.
|
||||
var opt_status = %GetOptimizationStatus(f);
|
||||
assertTrue(
|
||||
disable_asserts ||
|
||||
(opt_status & V8OptimizationStatus.kMaybeDeopted) !== 0 ||
|
||||
(opt_status & V8OptimizationStatus.kTopmostFrameIsTurboFanned) !== 0);
|
||||
} while (false);
|
||||
@ -54,7 +55,8 @@ function f() {
|
||||
}
|
||||
|
||||
%PrepareFunctionForOptimization(f);
|
||||
f();
|
||||
f(true); // Gather feedback first.
|
||||
f(false);
|
||||
|
||||
function g() {
|
||||
for (var i = 0; i < 1; i++) { }
|
||||
|
@ -378,7 +378,6 @@ v8_source_set("unittests_sources") {
|
||||
"objects/concurrent-script-context-table-unittest.cc",
|
||||
"objects/concurrent-string-unittest.cc",
|
||||
"objects/object-unittest.cc",
|
||||
"objects/osr-optimized-code-cache-unittest.cc",
|
||||
"objects/swiss-hash-table-helpers-unittest.cc",
|
||||
"objects/value-serializer-unittest.cc",
|
||||
"objects/weakarraylist-unittest.cc",
|
||||
|
@ -1,424 +0,0 @@
|
||||
// Copyright 2016 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
|
||||
#include "src/deoptimizer/deoptimizer.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
#include "src/objects/objects.h"
|
||||
#include "src/objects/osr-optimized-code-cache.h"
|
||||
#include "test/unittests/test-utils.h"
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
const char* code_template_string =
|
||||
"function f%d() { return 0; };"
|
||||
"%%PrepareFunctionForOptimization(f%d);"
|
||||
"f%d(); f%d();"
|
||||
"%%OptimizeFunctionOnNextCall(f%d);"
|
||||
"f%d(); f%d;";
|
||||
|
||||
void GetSource(base::ScopedVector<char>* source, int index) {
|
||||
base::SNPrintF(*source, code_template_string, index, index, index, index,
|
||||
index, index, index);
|
||||
}
|
||||
|
||||
const int kInitialLength = OSROptimizedCodeCache::kInitialLength;
|
||||
const int kInitialEntries =
|
||||
kInitialLength / OSROptimizedCodeCache::kEntryLength;
|
||||
const int kMaxLength = OSROptimizedCodeCache::kMaxLength;
|
||||
const int kMaxEntries = kMaxLength / OSROptimizedCodeCache::kEntryLength;
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST_F(TestWithNativeContext, AddCodeToEmptyCache) {
|
||||
if (!i::FLAG_opt) return;
|
||||
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
|
||||
base::ScopedVector<char> source(1024);
|
||||
GetSource(&source, 0);
|
||||
Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
|
||||
Isolate* isolate = function->GetIsolate();
|
||||
Handle<NativeContext> native_context(function->native_context(), isolate);
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<CodeT> code(function->code(), isolate);
|
||||
BytecodeOffset bailout_id(1);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
bailout_id);
|
||||
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kInitialLength);
|
||||
|
||||
HeapObject sfi_entry;
|
||||
osr_cache->RawGetForTesting(OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&sfi_entry);
|
||||
EXPECT_EQ(sfi_entry, *shared);
|
||||
HeapObject code_entry;
|
||||
osr_cache->RawGetForTesting(OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&code_entry);
|
||||
EXPECT_EQ(code_entry, *code);
|
||||
Smi osr_offset_entry;
|
||||
osr_cache->RawGetForTesting(OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&osr_offset_entry);
|
||||
EXPECT_EQ(osr_offset_entry.value(), bailout_id.ToInt());
|
||||
}
|
||||
|
||||
TEST_F(TestWithNativeContext, GrowCodeCache) {
|
||||
if (!i::FLAG_opt) return;
|
||||
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
|
||||
base::ScopedVector<char> source(1024);
|
||||
GetSource(&source, 0);
|
||||
Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
|
||||
Isolate* isolate = function->GetIsolate();
|
||||
Handle<NativeContext> native_context(function->native_context(), isolate);
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<CodeT> code(function->code(), isolate);
|
||||
|
||||
int bailout_id = 0;
|
||||
for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kInitialLength);
|
||||
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kInitialLength * 2);
|
||||
|
||||
int index = kInitialLength;
|
||||
HeapObject sfi_entry;
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&sfi_entry);
|
||||
EXPECT_EQ(sfi_entry, *shared);
|
||||
HeapObject code_entry;
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&code_entry);
|
||||
EXPECT_EQ(code_entry, *code);
|
||||
Smi osr_offset_entry;
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&osr_offset_entry);
|
||||
EXPECT_EQ(osr_offset_entry.value(), bailout_id);
|
||||
}
|
||||
|
||||
TEST_F(TestWithNativeContext, FindCachedEntry) {
|
||||
if (!i::FLAG_opt) return;
|
||||
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
|
||||
base::ScopedVector<char> source(1024);
|
||||
GetSource(&source, 0);
|
||||
Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
|
||||
Isolate* isolate = function->GetIsolate();
|
||||
Handle<NativeContext> native_context(function->native_context(), isolate);
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<CodeT> code(function->code(), isolate);
|
||||
|
||||
int bailout_id = 0;
|
||||
for (bailout_id = 0; bailout_id < kInitialEntries; bailout_id++) {
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
|
||||
base::ScopedVector<char> source1(1024);
|
||||
GetSource(&source1, 1);
|
||||
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
|
||||
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
|
||||
Handle<CodeT> code1(function1->code(), isolate);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared1, code1,
|
||||
BytecodeOffset(bailout_id));
|
||||
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->TryGet(*shared, BytecodeOffset(0), isolate), *code);
|
||||
EXPECT_EQ(osr_cache->TryGet(*shared1, BytecodeOffset(bailout_id), isolate),
|
||||
*code1);
|
||||
|
||||
RunJS("%DeoptimizeFunction(f1)");
|
||||
EXPECT_TRUE(osr_cache->TryGet(*shared1, BytecodeOffset(bailout_id), isolate)
|
||||
.is_null());
|
||||
|
||||
osr_cache->RawSetForTesting(OSROptimizedCodeCache::kCachedCodeOffset,
|
||||
HeapObjectReference::ClearedValue(isolate));
|
||||
EXPECT_TRUE(osr_cache->TryGet(*shared, BytecodeOffset(0), isolate).is_null());
|
||||
}
|
||||
|
||||
TEST_F(TestWithNativeContext, MaxCapacityCache) {
|
||||
if (!i::FLAG_opt) return;
|
||||
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
|
||||
base::ScopedVector<char> source(1024);
|
||||
GetSource(&source, 0);
|
||||
Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
|
||||
Isolate* isolate = function->GetIsolate();
|
||||
Handle<NativeContext> native_context(function->native_context(), isolate);
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<CodeT> code(function->code(), isolate);
|
||||
|
||||
int bailout_id = 0;
|
||||
// Add max_capacity - 1 entries.
|
||||
for (bailout_id = 0; bailout_id < kMaxEntries - 1; bailout_id++) {
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kMaxLength);
|
||||
|
||||
// Add an entry to reach max capacity.
|
||||
base::ScopedVector<char> source1(1024);
|
||||
GetSource(&source1, 1);
|
||||
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
|
||||
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
|
||||
Handle<CodeT> code1(function1->code(), isolate);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared1, code1,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kMaxLength);
|
||||
|
||||
int index = (kMaxEntries - 1) * OSROptimizedCodeCache::kEntryLength;
|
||||
HeapObject object;
|
||||
Smi smi;
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *shared1);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *code1);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&smi);
|
||||
EXPECT_EQ(smi.value(), bailout_id);
|
||||
|
||||
// Add an entry beyond max capacity.
|
||||
base::ScopedVector<char> source2(1024);
|
||||
GetSource(&source2, 2);
|
||||
Handle<JSFunction> function2 = RunJS<JSFunction>(source2.begin());
|
||||
Handle<SharedFunctionInfo> shared2(function2->shared(), isolate);
|
||||
Handle<CodeT> code2(function2->code(), isolate);
|
||||
bailout_id++;
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared2, code2,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kMaxLength);
|
||||
|
||||
index = 0;
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *shared2);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *code2);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&smi);
|
||||
EXPECT_EQ(smi.value(), bailout_id);
|
||||
}
|
||||
|
||||
TEST_F(TestWithNativeContext, ReuseClearedEntry) {
|
||||
if (!i::FLAG_opt) return;
|
||||
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
|
||||
base::ScopedVector<char> source(1024);
|
||||
GetSource(&source, 0);
|
||||
Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
|
||||
Isolate* isolate = function->GetIsolate();
|
||||
Handle<NativeContext> native_context(function->native_context(), isolate);
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<CodeT> code(function->code(), isolate);
|
||||
|
||||
int num_entries = kInitialEntries * 2;
|
||||
int expected_length = kInitialLength * 2;
|
||||
int bailout_id = 0;
|
||||
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
int clear_index1 = 0;
|
||||
int clear_index2 = (num_entries - 1) * OSROptimizedCodeCache::kEntryLength;
|
||||
osr_cache->RawSetForTesting(
|
||||
clear_index1 + OSROptimizedCodeCache::kSharedOffset,
|
||||
HeapObjectReference::ClearedValue(isolate));
|
||||
osr_cache->RawSetForTesting(
|
||||
clear_index2 + OSROptimizedCodeCache::kCachedCodeOffset,
|
||||
HeapObjectReference::ClearedValue(isolate));
|
||||
|
||||
base::ScopedVector<char> source1(1024);
|
||||
GetSource(&source1, 1);
|
||||
Handle<JSFunction> function1 = RunJS<JSFunction>(source1.begin());
|
||||
Handle<SharedFunctionInfo> shared1(function1->shared(), isolate);
|
||||
Handle<CodeT> code1(function1->code(), isolate);
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared1, code1,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
int index = clear_index1;
|
||||
HeapObject object;
|
||||
Smi smi;
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *shared1);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *code1);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&smi);
|
||||
EXPECT_EQ(smi.value(), bailout_id);
|
||||
|
||||
base::ScopedVector<char> source2(1024);
|
||||
GetSource(&source2, 2);
|
||||
Handle<JSFunction> function2 = RunJS<JSFunction>(source2.begin());
|
||||
Handle<SharedFunctionInfo> shared2(function2->shared(), isolate);
|
||||
Handle<CodeT> code2(function2->code(), isolate);
|
||||
bailout_id++;
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared2, code2,
|
||||
BytecodeOffset(bailout_id));
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
index = clear_index2;
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *shared2);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->GetHeapObject(&object);
|
||||
EXPECT_EQ(object, *code2);
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->ToSmi(&smi);
|
||||
EXPECT_EQ(smi.value(), bailout_id);
|
||||
}
|
||||
|
||||
TEST_F(TestWithNativeContext, EvictDeoptedEntriesNoCompact) {
|
||||
if (!i::FLAG_opt) return;
|
||||
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
|
||||
base::ScopedVector<char> source(1024);
|
||||
GetSource(&source, 0);
|
||||
Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
|
||||
Isolate* isolate = function->GetIsolate();
|
||||
Handle<NativeContext> native_context(function->native_context(), isolate);
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<CodeT> code(function->code(), isolate);
|
||||
|
||||
base::ScopedVector<char> source1(1024);
|
||||
GetSource(&source1, 1);
|
||||
Handle<JSFunction> deopt_function = RunJS<JSFunction>(source1.begin());
|
||||
Handle<SharedFunctionInfo> deopt_shared(deopt_function->shared(), isolate);
|
||||
Handle<CodeT> deopt_code(deopt_function->code(), isolate);
|
||||
|
||||
int num_entries = kInitialEntries * 2;
|
||||
int expected_length = kInitialLength * 2;
|
||||
int deopt_id1 = num_entries - 2;
|
||||
int deopt_id2 = 0;
|
||||
int bailout_id = 0;
|
||||
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
|
||||
if (bailout_id == deopt_id1 || bailout_id == deopt_id2) {
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, deopt_shared,
|
||||
deopt_code, BytecodeOffset(bailout_id));
|
||||
} else {
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
}
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
RunJS("%DeoptimizeFunction(f1)");
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
int index = (num_entries - 2) * OSROptimizedCodeCache::kEntryLength;
|
||||
EXPECT_TRUE(
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(
|
||||
osr_cache
|
||||
->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->IsCleared());
|
||||
|
||||
index = (num_entries - 1) * OSROptimizedCodeCache::kEntryLength;
|
||||
EXPECT_TRUE(
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kSharedOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(
|
||||
osr_cache
|
||||
->RawGetForTesting(index + OSROptimizedCodeCache::kCachedCodeOffset)
|
||||
->IsCleared());
|
||||
EXPECT_TRUE(
|
||||
osr_cache->RawGetForTesting(index + OSROptimizedCodeCache::kOsrIdOffset)
|
||||
->IsCleared());
|
||||
}
|
||||
|
||||
TEST_F(TestWithNativeContext, EvictDeoptedEntriesCompact) {
|
||||
if (!i::FLAG_opt) return;
|
||||
|
||||
i::FLAG_allow_natives_syntax = true;
|
||||
|
||||
base::ScopedVector<char> source(1024);
|
||||
GetSource(&source, 0);
|
||||
Handle<JSFunction> function = RunJS<JSFunction>(source.begin());
|
||||
Isolate* isolate = function->GetIsolate();
|
||||
Handle<NativeContext> native_context(function->native_context(), isolate);
|
||||
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
|
||||
Handle<CodeT> code(function->code(), isolate);
|
||||
|
||||
base::ScopedVector<char> source1(1024);
|
||||
GetSource(&source1, 1);
|
||||
Handle<JSFunction> deopt_function = RunJS<JSFunction>(source1.begin());
|
||||
Handle<SharedFunctionInfo> deopt_shared(deopt_function->shared(), isolate);
|
||||
Handle<CodeT> deopt_code(deopt_function->code(), isolate);
|
||||
|
||||
int num_entries = kInitialEntries + 1;
|
||||
int expected_length = kInitialLength * 2;
|
||||
int bailout_id = 0;
|
||||
for (bailout_id = 0; bailout_id < num_entries; bailout_id++) {
|
||||
if (bailout_id % 2 == 0) {
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, deopt_shared,
|
||||
deopt_code, BytecodeOffset(bailout_id));
|
||||
} else {
|
||||
OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
|
||||
BytecodeOffset(bailout_id));
|
||||
}
|
||||
}
|
||||
Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
|
||||
isolate);
|
||||
EXPECT_EQ(osr_cache->length(), expected_length);
|
||||
|
||||
RunJS("%DeoptimizeFunction(f1)");
|
||||
osr_cache =
|
||||
Handle<OSROptimizedCodeCache>(native_context->osr_code_cache(), isolate);
|
||||
EXPECT_EQ(osr_cache->length(), kInitialLength);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
Loading…
Reference in New Issue
Block a user