[baseline] Reduce jump targets behind --no-deopt-to-baseline flag.
Add a --deopt-to-baseline flag, on by default, which allows returning to sparkplug code when deoptimizing. However when we turn this off, no longer deoptimizing to baseline code means we can omit marking most bytecodes as valid jump targets. Leaving just OSR and exception handling entry points. This reduces the baseline code size by ~18% on Arm64. Bug: v8:13082 Change-Id: I5b5a6679465807d7fe812cb977464167efffa7ab Cq-Include-Trybots: luci.v8.try:v8_linux_arm64_cfi_rel_ng Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3785006 Reviewed-by: Leszek Swirski <leszeks@chromium.org> Reviewed-by: Toon Verwaest <verwaest@chromium.org> Commit-Queue: Pierre Langlois <pierre.langlois@arm.com> Cr-Commit-Position: refs/heads/main@{#82266}
This commit is contained in:
parent
d9dcca6d2f
commit
f1d1b2f9db
@ -277,8 +277,9 @@ BaselineCompiler::BaselineCompiler(
|
||||
basm_(&masm_),
|
||||
iterator_(bytecode_),
|
||||
zone_(local_isolate->allocator(), ZONE_NAME),
|
||||
labels_(zone_.NewArray<Label*>(bytecode_->length())) {
|
||||
MemsetPointer(labels_, nullptr, bytecode_->length());
|
||||
labels_(zone_.NewArray<BaselineLabelPointer>(bytecode_->length())) {
|
||||
MemsetPointer(reinterpret_cast<Address*>(labels_), Address{0},
|
||||
bytecode_->length());
|
||||
|
||||
// Empirically determined expected size of the offset table at the 95th %ile,
|
||||
// based on the size of the bytecode, to be:
|
||||
@ -299,6 +300,12 @@ BaselineCompiler::BaselineCompiler(
|
||||
void BaselineCompiler::GenerateCode() {
|
||||
{
|
||||
RCS_BASELINE_SCOPE(PreVisit);
|
||||
// Mark exception handlers as valid indirect jump targets. This is required
|
||||
// when CFI is enabled, to allow indirect jumps into baseline code.
|
||||
HandlerTable table(*bytecode_);
|
||||
for (int i = 0; i < table.NumberOfRangeEntries(); ++i) {
|
||||
labels_[table.GetRangeHandler(i)].MarkAsIndirectJumpTarget();
|
||||
}
|
||||
for (; !iterator_.done(); iterator_.Advance()) {
|
||||
PreVisitSingleBytecode();
|
||||
}
|
||||
@ -438,7 +445,8 @@ void BaselineCompiler::AddPosition() {
|
||||
void BaselineCompiler::PreVisitSingleBytecode() {
|
||||
switch (iterator().current_bytecode()) {
|
||||
case interpreter::Bytecode::kJumpLoop:
|
||||
EnsureLabel(iterator().GetJumpTargetOffset());
|
||||
EnsureLabel(iterator().GetJumpTargetOffset(),
|
||||
MarkAsIndirectJumpTarget::kYes);
|
||||
break;
|
||||
|
||||
// TODO(leszeks): Update the max_call_args as part of the main bytecode
|
||||
@ -470,11 +478,13 @@ void BaselineCompiler::PreVisitSingleBytecode() {
|
||||
|
||||
void BaselineCompiler::VisitSingleBytecode() {
|
||||
int offset = iterator().current_offset();
|
||||
if (labels_[offset]) __ Bind(labels_[offset]);
|
||||
|
||||
// Mark position as valid jump target. This is required for the deoptimizer
|
||||
// and exception handling, when CFI is enabled.
|
||||
__ JumpTarget();
|
||||
BaselineLabelPointer label = labels_[offset];
|
||||
if (label.GetPointer()) __ Bind(label.GetPointer());
|
||||
// Mark position as valid jump target unconditionnaly when the deoptimizer can
|
||||
// jump to baseline code. This is required when CFI is enabled.
|
||||
if (FLAG_deopt_to_baseline || label.IsIndirectJumpTarget()) {
|
||||
__ JumpTarget();
|
||||
}
|
||||
|
||||
#ifdef V8_CODE_COMMENTS
|
||||
std::ostringstream str;
|
||||
@ -1897,7 +1907,7 @@ void BaselineCompiler::VisitJumpLoop() {
|
||||
}
|
||||
|
||||
__ Bind(&osr_not_armed);
|
||||
Label* label = labels_[iterator().GetJumpTargetOffset()];
|
||||
Label* label = labels_[iterator().GetJumpTargetOffset()].GetPointer();
|
||||
int weight = iterator().GetRelativeJumpTargetOffset() -
|
||||
iterator().current_bytecode_size_without_prefix();
|
||||
// We can pass in the same label twice since it's a back edge and thus already
|
||||
|
@ -11,6 +11,7 @@
|
||||
#if ENABLE_SPARKPLUG
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/base/pointer-with-payload.h"
|
||||
#include "src/base/threaded-list.h"
|
||||
#include "src/base/vlq.h"
|
||||
#include "src/baseline/baseline-assembler.h"
|
||||
@ -171,14 +172,27 @@ class BaselineCompiler {
|
||||
|
||||
int max_call_args_ = 0;
|
||||
|
||||
Label* EnsureLabel(int i) {
|
||||
if (labels_[i] == nullptr) {
|
||||
labels_[i] = zone_.New<Label>();
|
||||
// Mark location as a jump target reachable via indirect branches, required
|
||||
// for CFI.
|
||||
enum class MarkAsIndirectJumpTarget { kNo, kYes };
|
||||
|
||||
struct BaselineLabelPointer : base::PointerWithPayload<Label, bool, 1> {
|
||||
void MarkAsIndirectJumpTarget() { SetPayload(true); }
|
||||
bool IsIndirectJumpTarget() const { return GetPayload(); }
|
||||
};
|
||||
|
||||
Label* EnsureLabel(
|
||||
int i, MarkAsIndirectJumpTarget mark = MarkAsIndirectJumpTarget::kNo) {
|
||||
if (labels_[i].GetPointer() == nullptr) {
|
||||
labels_[i].SetPointer(zone_.New<Label>());
|
||||
}
|
||||
return labels_[i];
|
||||
if (mark == MarkAsIndirectJumpTarget::kYes) {
|
||||
labels_[i].MarkAsIndirectJumpTarget();
|
||||
}
|
||||
return labels_[i].GetPointer();
|
||||
}
|
||||
|
||||
Label** labels_;
|
||||
BaselineLabelPointer* labels_;
|
||||
};
|
||||
|
||||
} // namespace baseline
|
||||
|
@ -947,11 +947,11 @@ void Deoptimizer::DoComputeOutputFrames() {
|
||||
namespace {
|
||||
|
||||
// Get the dispatch builtin for unoptimized frames.
|
||||
Builtin DispatchBuiltinFor(bool is_baseline, bool advance_bc,
|
||||
Builtin DispatchBuiltinFor(bool deopt_to_baseline, bool advance_bc,
|
||||
bool is_restart_frame) {
|
||||
if (is_restart_frame) return Builtin::kRestartFrameTrampoline;
|
||||
|
||||
if (is_baseline) {
|
||||
if (deopt_to_baseline) {
|
||||
return advance_bc ? Builtin::kBaselineOrInterpreterEnterAtNextBytecode
|
||||
: Builtin::kBaselineOrInterpreterEnterAtBytecode;
|
||||
} else {
|
||||
@ -1017,14 +1017,15 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
|
||||
const bool advance_bc =
|
||||
(!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
|
||||
!goto_catch_handler;
|
||||
const bool is_baseline = shared.HasBaselineCode();
|
||||
const bool deopt_to_baseline =
|
||||
shared.HasBaselineCode() && FLAG_deopt_to_baseline;
|
||||
const bool restart_frame = goto_catch_handler && is_restart_frame();
|
||||
Code dispatch_builtin = FromCodeT(builtins->code(
|
||||
DispatchBuiltinFor(is_baseline, advance_bc, restart_frame)));
|
||||
DispatchBuiltinFor(deopt_to_baseline, advance_bc, restart_frame)));
|
||||
|
||||
if (verbose_tracing_enabled()) {
|
||||
PrintF(trace_scope()->file(), " translating %s frame ",
|
||||
is_baseline ? "baseline" : "interpreted");
|
||||
deopt_to_baseline ? "baseline" : "interpreted");
|
||||
std::unique_ptr<char[]> name = shared.DebugNameCStr();
|
||||
PrintF(trace_scope()->file(), "%s", name.get());
|
||||
PrintF(trace_scope()->file(), " => bytecode_offset=%d, ",
|
||||
|
@ -1635,6 +1635,8 @@ DEFINE_BOOL(always_turbofan, false, "always try to optimize functions")
|
||||
DEFINE_IMPLICATION(always_turbofan, turbofan)
|
||||
DEFINE_BOOL(always_osr, false, "always try to OSR functions")
|
||||
DEFINE_BOOL(prepare_always_turbofan, false, "prepare for turning on always opt")
|
||||
DEFINE_BOOL(deopt_to_baseline, ENABLE_SPARKPLUG,
|
||||
"deoptimize to baseline code when available")
|
||||
|
||||
DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
|
||||
#ifdef DEBUG
|
||||
|
32
test/mjsunit/baseline/deopt-to-baseline.js
Normal file
32
test/mjsunit/baseline/deopt-to-baseline.js
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2022 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --allow-natives-syntax --sparkplug --no-always-sparkplug --turbofan
|
||||
// Flags: --no-deopt-to-baseline
|
||||
|
||||
function isExecutingInterpreted(func) {
|
||||
let opt_status = %GetOptimizationStatus(func);
|
||||
return (opt_status & V8OptimizationStatus.kTopmostFrameIsInterpreted) !== 0;
|
||||
}
|
||||
|
||||
function f(check = false) {
|
||||
if (check) {
|
||||
%DeoptimizeFunction(f);
|
||||
assertTrue(isExecutingInterpreted(f));
|
||||
}
|
||||
}
|
||||
|
||||
f();
|
||||
%CompileBaseline(f);
|
||||
f();
|
||||
assertTrue(isBaseline(f));
|
||||
|
||||
%PrepareFunctionForOptimization(f);
|
||||
f();
|
||||
f();
|
||||
%OptimizeFunctionOnNextCall(f);
|
||||
f();
|
||||
assertOptimized(f);
|
||||
|
||||
f(true);
|
Loading…
Reference in New Issue
Block a user