[tiering] Centralize the optimization decision

OptimizationDecision holds the optimization {reason, code kind,
concurrency mode}. We centralize it to avoid having to recalculate in
Optimize when we already know everything in ShouldOptimize.

Bug: v8:7700
Change-Id: Ifcd902e86f844ce045bcefd4ae72ac17b42acb3a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3500300
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Auto-Submit: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79326}
This commit is contained in:
Jakob Gruber 2022-03-02 11:11:41 +01:00 committed by V8 LUCI CQ
parent 5cdac4b8c4
commit 22bbb73dbf
4 changed files with 67 additions and 29 deletions

View File

@ -1702,7 +1702,7 @@ inline std::ostream& operator<<(std::ostream& os,
enum class BlockingBehavior { kBlock, kDontBlock };
enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
enum class ConcurrencyMode : uint8_t { kNotConcurrent, kConcurrent };
inline const char* ToString(ConcurrencyMode mode) {
switch (mode) {

View File

@ -86,12 +86,47 @@ void TraceRecompile(JSFunction function, OptimizationReason reason,
} // namespace
void TieringManager::Optimize(JSFunction function, OptimizationReason reason,
CodeKind code_kind) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
TraceRecompile(function, reason, code_kind, isolate_);
function.MarkForOptimization(isolate_, CodeKind::TURBOFAN,
ConcurrencyMode::kConcurrent);
class OptimizationDecision {
public:
static constexpr OptimizationDecision TurbofanHotAndStable() {
return {OptimizationReason::kHotAndStable, CodeKind::TURBOFAN,
ConcurrencyMode::kConcurrent};
}
static constexpr OptimizationDecision TurbofanSmallFunction() {
return {OptimizationReason::kSmallFunction, CodeKind::TURBOFAN,
ConcurrencyMode::kConcurrent};
}
static constexpr OptimizationDecision DoNotOptimize() {
return {OptimizationReason::kDoNotOptimize,
// These values don't matter but we have to pass something.
CodeKind::TURBOFAN, ConcurrencyMode::kConcurrent};
}
constexpr bool should_optimize() const {
return optimization_reason != OptimizationReason::kDoNotOptimize;
}
OptimizationReason optimization_reason;
CodeKind code_kind;
ConcurrencyMode concurrency_mode;
private:
OptimizationDecision() = default;
constexpr OptimizationDecision(OptimizationReason optimization_reason,
CodeKind code_kind,
ConcurrencyMode concurrency_mode)
: optimization_reason(optimization_reason),
code_kind(code_kind),
concurrency_mode(concurrency_mode) {}
};
// Since we pass by value:
STATIC_ASSERT(sizeof(OptimizationDecision) <= kInt32Size);
void TieringManager::Optimize(JSFunction function, CodeKind code_kind,
OptimizationDecision d) {
DCHECK(d.should_optimize());
TraceRecompile(function, d.optimization_reason, code_kind, isolate_);
function.MarkForOptimization(isolate_, d.code_kind, d.concurrency_mode);
}
void TieringManager::AttemptOnStackReplacement(UnoptimizedFrame* frame,
@ -168,12 +203,8 @@ void TieringManager::MaybeOptimizeFrame(JSFunction function,
}
}
OptimizationReason reason = ShouldOptimize(
function, function.shared().GetBytecodeArray(isolate_), frame);
if (reason != OptimizationReason::kDoNotOptimize) {
Optimize(function, reason, code_kind);
}
OptimizationDecision d = ShouldOptimize(function, code_kind, frame);
if (d.should_optimize()) Optimize(function, code_kind, d);
}
bool TieringManager::MaybeOSR(JSFunction function, UnoptimizedFrame* frame) {
@ -200,16 +231,21 @@ bool ShouldOptimizeAsSmallFunction(int bytecode_size, bool any_ic_changed) {
} // namespace
OptimizationReason TieringManager::ShouldOptimize(JSFunction function,
BytecodeArray bytecode,
JavaScriptFrame* frame) {
if (function.ActiveTierIsTurbofan()) {
return OptimizationReason::kDoNotOptimize;
OptimizationDecision TieringManager::ShouldOptimize(JSFunction function,
CodeKind code_kind,
JavaScriptFrame* frame) {
DCHECK_EQ(code_kind, function.GetActiveTier().value());
if (code_kind == CodeKind::TURBOFAN) {
// Already in the top tier.
return OptimizationDecision::DoNotOptimize();
}
// If function's SFI has OSR cache, once enter loop range of OSR cache, set
// OSR loop nesting level for matching condition of OSR (loop_depth <
// osr_level), soon later OSR will be triggered when executing bytecode
// JumpLoop which is entry of the OSR cache, then hit the OSR cache.
BytecodeArray bytecode = function.shared().GetBytecodeArray(isolate_);
if (V8_UNLIKELY(function.shared().osr_code_cache_state() > kNotCached) &&
frame->is_unoptimized()) {
int current_offset =
@ -227,7 +263,7 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function,
current_offset >= jump_target_offset) {
bytecode.set_osr_loop_nesting_level(iterator.GetImmediateOperand(1) +
1);
return OptimizationReason::kHotAndStable;
return OptimizationDecision::TurbofanHotAndStable();
}
}
}
@ -236,12 +272,12 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function,
FLAG_ticks_before_optimization +
(bytecode.length() / FLAG_bytecode_size_allowance_per_tick);
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
return OptimizationDecision::TurbofanHotAndStable();
} else if (ShouldOptimizeAsSmallFunction(bytecode.length(),
any_ic_changed_)) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
return OptimizationReason::kSmallFunction;
return OptimizationDecision::TurbofanSmallFunction();
} else if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function.PrintName();
@ -253,7 +289,7 @@ OptimizationReason TieringManager::ShouldOptimize(JSFunction function,
bytecode.length(), FLAG_max_bytecode_size_for_early_opt);
}
}
return OptimizationReason::kDoNotOptimize;
return OptimizationDecision::DoNotOptimize();
}
TieringManager::OnInterruptTickScope::OnInterruptTickScope(

View File

@ -17,7 +17,8 @@ class Isolate;
class UnoptimizedFrame;
class JavaScriptFrame;
class JSFunction;
enum class CodeKind;
class OptimizationDecision;
enum class CodeKind : uint8_t;
enum class OptimizationReason : uint8_t;
class TieringManager {
@ -45,11 +46,10 @@ class TieringManager {
// Potentially attempts OSR from and returns whether no other
// optimization attempts should be made.
bool MaybeOSR(JSFunction function, UnoptimizedFrame* frame);
OptimizationReason ShouldOptimize(JSFunction function,
BytecodeArray bytecode_array,
JavaScriptFrame* frame);
void Optimize(JSFunction function, OptimizationReason reason,
CodeKind code_kind);
OptimizationDecision ShouldOptimize(JSFunction function, CodeKind code_kind,
JavaScriptFrame* frame);
void Optimize(JSFunction function, CodeKind code_kind,
OptimizationDecision decision);
void Baseline(JSFunction function, OptimizationReason reason);
class V8_NODISCARD OnInterruptTickScope final {

View File

@ -31,7 +31,7 @@ namespace internal {
V(MAGLEV) \
V(TURBOFAN)
enum class CodeKind {
enum class CodeKind : uint8_t {
#define DEFINE_CODE_KIND_ENUM(name) name,
CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
#undef DEFINE_CODE_KIND_ENUM
@ -42,6 +42,8 @@ STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOFAN);
#define V(...) +1
static constexpr int kCodeKindCount = CODE_KIND_LIST(V);
#undef V
// Unlikely, but just to be safe:
STATIC_ASSERT(kCodeKindCount <= std::numeric_limits<uint8_t>::max());
const char* CodeKindToString(CodeKind kind);