[maglev] Initial Maglev commit

Maglev is mid-tier optimising compiler designed mainly for compilation
speed that can still generate good code for straightforward JS.

This initial commit is an MVP for Maglev which can compile and run some
very simple code, and sets up a framework that we can build upon.

Design:
https://docs.google.com/document/d/13CwgSL4yawxuYg3iNlM-4ZPCB8RgJya6b8H_E2F-Aek/edit#

Bug: v8:7700
Change-Id: I5ae074ae099126c2c0d50864ac9b3d6fa5c9e85a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3483664
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Toon Verwaest <verwaest@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79247}
This commit is contained in:
Leszek Swirski 2022-02-24 10:02:26 +01:00 committed by V8 LUCI CQ
parent 4be0a3486c
commit a5a87e1e87
72 changed files with 7668 additions and 191 deletions

View File

@ -198,6 +198,10 @@ declare_args() {
# Sets -dV8_EXTERNAL_CODE_SPACE
v8_enable_external_code_space = ""
# Enable the Maglev compiler.
# Sets -dV8_ENABLE_MAGLEV
v8_enable_maglev = ""
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
@ -425,6 +429,9 @@ if (v8_enable_external_code_space == "") {
(target_os != "android" && target_os != "fuchsia" &&
v8_current_cpu == "arm64"))
}
if (v8_enable_maglev == "") {
v8_enable_maglev = v8_current_cpu == "x64" && v8_enable_pointer_compression
}
if (v8_enable_single_generation == "") {
v8_enable_single_generation = v8_disable_write_barriers
}
@ -956,6 +963,9 @@ config("features") {
if (v8_enable_external_code_space) {
defines += [ "V8_EXTERNAL_CODE_SPACE" ]
}
if (v8_enable_maglev) {
defines += [ "V8_ENABLE_MAGLEV" ]
}
if (v8_enable_swiss_name_dictionary) {
defines += [ "V8_ENABLE_SWISS_NAME_DICTIONARY" ]
}
@ -3452,6 +3462,28 @@ v8_header_set("v8_internal_headers") {
sources -= [ "//base/trace_event/common/trace_event_common.h" ]
}
if (v8_enable_maglev) {
sources += [
"src/maglev/maglev-basic-block.h",
"src/maglev/maglev-code-gen-state.h",
"src/maglev/maglev-code-generator.h",
"src/maglev/maglev-compilation-data.h",
"src/maglev/maglev-compiler.h",
"src/maglev/maglev-graph-builder.h",
"src/maglev/maglev-graph-labeller.h",
"src/maglev/maglev-graph-printer.h",
"src/maglev/maglev-graph-processor.h",
"src/maglev/maglev-graph.h",
"src/maglev/maglev-interpreter-frame-state.h",
"src/maglev/maglev-ir.h",
"src/maglev/maglev-regalloc-data.h",
"src/maglev/maglev-regalloc.h",
"src/maglev/maglev-register-frame-array.h",
"src/maglev/maglev-vreg-allocator.h",
"src/maglev/maglev.h",
]
}
if (v8_enable_webassembly) {
sources += [
"src/asmjs/asm-js.h",
@ -4453,6 +4485,19 @@ v8_source_set("v8_base_without_compiler") {
"src/zone/zone.cc",
]
if (v8_enable_maglev) {
sources += [
"src/maglev/maglev-code-generator.cc",
"src/maglev/maglev-compilation-data.cc",
"src/maglev/maglev-compiler.cc",
"src/maglev/maglev-graph-builder.cc",
"src/maglev/maglev-graph-printer.cc",
"src/maglev/maglev-ir.cc",
"src/maglev/maglev-regalloc.cc",
"src/maglev/maglev.cc",
]
}
if (v8_enable_webassembly) {
sources += [ ### gcmole(all) ###
"src/asmjs/asm-js.cc",

View File

@ -51,6 +51,8 @@ include_rules = [
"+src/interpreter/interpreter.h",
"+src/interpreter/interpreter-generator.h",
"+src/interpreter/setup-interpreter.h",
"-src/maglev",
"+src/maglev/maglev.h",
"-src/regexp",
"+src/regexp/regexp.h",
"+src/regexp/regexp-flags.h",

View File

@ -160,6 +160,15 @@ class ThreadedListBase final : public BaseClass {
return *this;
}
bool is_null() { return entry_ == nullptr; }
void InsertBefore(T* value) {
T* old_entry_value = *entry_;
*entry_ = value;
entry_ = TLTraits::next(value);
*entry_ = old_entry_value;
}
Iterator() : entry_(nullptr) {}
private:
@ -178,6 +187,10 @@ class ThreadedListBase final : public BaseClass {
using reference = const value_type;
using pointer = const value_type*;
// Allow implicit conversion to const iterator.
// NOLINTNEXTLINE
ConstIterator(Iterator& iterator) : entry_(iterator.entry_) {}
public:
ConstIterator& operator++() {
entry_ = TLTraits::next(*entry_);

View File

@ -952,6 +952,13 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
TailCallRuntimeIfMarkerEquals(
masm, optimization_marker,
OptimizationMarker::kCompileMaglev_NotConcurrent,
Runtime::kCompileMaglev_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileMaglev_Concurrent,
Runtime::kCompileMaglev_Concurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimization_marker,
OptimizationMarker::kCompileTurbofan_NotConcurrent,

View File

@ -64,6 +64,10 @@
#include "src/web-snapshot/web-snapshot.h"
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
#ifdef V8_ENABLE_MAGLEV
#include "src/maglev/maglev.h"
#endif // V8_ENABLE_MAGLEV
namespace v8 {
namespace internal {
@ -192,29 +196,13 @@ class CompilerTracer : public AllStatic {
}
};
} // namespace
// Helper that times a scoped region and records the elapsed time.
struct ScopedTimer {
explicit ScopedTimer(base::TimeDelta* location) : location_(location) {
DCHECK_NOT_NULL(location_);
timer_.Start();
}
~ScopedTimer() { *location_ += timer_.Elapsed(); }
base::ElapsedTimer timer_;
base::TimeDelta* location_;
};
// static
void Compiler::LogFunctionCompilation(Isolate* isolate,
CodeEventListener::LogEventsAndTags tag,
Handle<Script> script,
Handle<SharedFunctionInfo> shared,
Handle<FeedbackVector> vector,
Handle<AbstractCode> abstract_code,
CodeKind kind, double time_taken_ms) {
void LogFunctionCompilation(Isolate* isolate,
CodeEventListener::LogEventsAndTags tag,
Handle<Script> script,
Handle<SharedFunctionInfo> shared,
Handle<FeedbackVector> vector,
Handle<AbstractCode> abstract_code, CodeKind kind,
double time_taken_ms) {
DCHECK(!abstract_code.is_null());
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
DCHECK_NE(*abstract_code, FromCodeT(*BUILTIN_CODE(isolate, CompileLazy)));
@ -282,6 +270,21 @@ void Compiler::LogFunctionCompilation(Isolate* isolate,
*debug_name));
}
} // namespace
// Helper that times a scoped region and records the elapsed time.
struct ScopedTimer {
explicit ScopedTimer(base::TimeDelta* location) : location_(location) {
DCHECK_NOT_NULL(location_);
timer_.Start();
}
~ScopedTimer() { *location_ += timer_.Elapsed(); }
base::ElapsedTimer timer_;
base::TimeDelta* location_;
};
namespace {
ScriptOriginOptions OriginOptionsForEval(Object script) {
@ -369,9 +372,9 @@ void RecordUnoptimizedFunctionCompilation(
time_taken_to_finalize.InMillisecondsF();
Handle<Script> script(Script::cast(shared->script()), isolate);
Compiler::LogFunctionCompilation(
isolate, tag, script, shared, Handle<FeedbackVector>(), abstract_code,
CodeKind::INTERPRETED_FUNCTION, time_taken_ms);
LogFunctionCompilation(isolate, tag, script, shared, Handle<FeedbackVector>(),
abstract_code, CodeKind::INTERPRETED_FUNCTION,
time_taken_ms);
}
} // namespace
@ -507,7 +510,7 @@ void OptimizedCompilationJob::RecordFunctionCompilation(
Script::cast(compilation_info()->shared_info()->script()), isolate);
Handle<FeedbackVector> feedback_vector(
compilation_info()->closure()->feedback_vector(), isolate);
Compiler::LogFunctionCompilation(
LogFunctionCompilation(
isolate, tag, script, compilation_info()->shared_info(), feedback_vector,
abstract_code, compilation_info()->code_kind(), time_taken_ms);
}
@ -1021,6 +1024,85 @@ enum class GetOptimizedCodeResultHandling {
kDiscardForTesting,
};
bool ShouldOptimize(CodeKind code_kind, Handle<SharedFunctionInfo> shared) {
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
switch (code_kind) {
case CodeKind::TURBOFAN:
return FLAG_opt && shared->PassesFilter(FLAG_turbo_filter);
case CodeKind::MAGLEV:
// TODO(v8:7700): FLAG_maglev_filter.
return FLAG_maglev;
default:
UNREACHABLE();
}
}
MaybeHandle<CodeT> CompileTurbofan(
Isolate* isolate, Handle<JSFunction> function,
Handle<SharedFunctionInfo> shared, ConcurrencyMode mode,
BytecodeOffset osr_offset, JavaScriptFrame* osr_frame,
GetOptimizedCodeResultHandling result_handling) {
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
static constexpr CodeKind kCodeKind = CodeKind::TURBOFAN;
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
bool has_script = shared->script().IsScript();
// BUG(5946): This DCHECK is necessary to make certain that we won't
// tolerate the lack of a script without bytecode.
DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
std::unique_ptr<OptimizedCompilationJob> job(
compiler::Pipeline::NewCompilationJob(isolate, function, kCodeKind,
has_script, osr_offset, osr_frame));
OptimizedCompilationInfo* compilation_info = job->compilation_info();
if (result_handling == GetOptimizedCodeResultHandling::kDiscardForTesting) {
compilation_info->set_discard_result_for_testing();
}
// Prepare the job and launch concurrent compilation, or compile now.
if (mode == ConcurrencyMode::kConcurrent) {
if (GetOptimizedCodeLater(std::move(job), isolate, compilation_info,
kCodeKind, function)) {
return ContinuationForConcurrentOptimization(isolate, function);
}
} else {
DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
if (GetOptimizedCodeNow(job.get(), isolate, compilation_info)) {
return ToCodeT(compilation_info->code(), isolate);
}
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
return {};
}
MaybeHandle<CodeT> CompileMaglev(
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
BytecodeOffset osr_offset, JavaScriptFrame* osr_frame,
GetOptimizedCodeResultHandling result_handling) {
// TODO(v8:7700): Add missing support.
CHECK(mode == ConcurrencyMode::kNotConcurrent);
CHECK(osr_offset.IsNone());
CHECK(osr_frame == nullptr);
CHECK(result_handling == GetOptimizedCodeResultHandling::kDefault);
// TODO(v8:7700): Tracing, see CompileTurbofan.
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
#ifdef V8_ENABLE_MAGLEV
return Maglev::Compile(isolate, function);
#else
return {};
#endif
}
MaybeHandle<CodeT> GetOptimizedCode(
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
CodeKind code_kind, BytecodeOffset osr_offset = BytecodeOffset::None(),
@ -1035,6 +1117,7 @@ MaybeHandle<CodeT> GetOptimizedCode(
// don't try to re-optimize.
if (function->HasOptimizationMarker()) function->ClearOptimizationMarker();
// TODO(v8:7700): Distinguish between Maglev and Turbofan.
if (shared->optimization_disabled() &&
shared->disabled_optimization_reason() == BailoutReason::kNeverOptimize) {
return {};
@ -1043,12 +1126,12 @@ MaybeHandle<CodeT> GetOptimizedCode(
// Do not optimize when debugger needs to hook into every call.
if (isolate->debug()->needs_check_on_function_call()) return {};
// Do not use TurboFan if we need to be able to set break points.
// Do not optimize if we need to be able to set break points.
if (shared->HasBreakInfo()) return {};
// Do not use TurboFan if optimization is disabled or function doesn't pass
// Do not optimize if optimization is disabled or function doesn't pass
// turbo_filter.
if (!FLAG_opt || !shared->PassesFilter(FLAG_turbo_filter)) return {};
if (!ShouldOptimize(code_kind, shared)) return {};
// If code was pending optimization for testing, remove the entry from the
// table that was preventing the bytecode from being flushed.
@ -1067,45 +1150,19 @@ MaybeHandle<CodeT> GetOptimizedCode(
}
}
// Reset profiler ticks, function is no longer considered hot.
// Reset profiler ticks, the function is no longer considered hot.
// TODO(v8:7700): Update for Maglev tiering.
DCHECK(shared->is_compiled());
function->feedback_vector().set_profiler_ticks(0);
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
bool has_script = shared->script().IsScript();
// BUG(5946): This DCHECK is necessary to make certain that we won't
// tolerate the lack of a script without bytecode.
DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
std::unique_ptr<OptimizedCompilationJob> job(
compiler::Pipeline::NewCompilationJob(isolate, function, code_kind,
has_script, osr_offset, osr_frame));
OptimizedCompilationInfo* compilation_info = job->compilation_info();
if (result_handling == GetOptimizedCodeResultHandling::kDiscardForTesting) {
compilation_info->set_discard_result_for_testing();
}
// Prepare the job and launch concurrent compilation, or compile now.
if (mode == ConcurrencyMode::kConcurrent) {
if (GetOptimizedCodeLater(std::move(job), isolate, compilation_info,
code_kind, function)) {
return ContinuationForConcurrentOptimization(isolate, function);
}
if (code_kind == CodeKind::TURBOFAN) {
return CompileTurbofan(isolate, function, shared, mode, osr_offset,
osr_frame, result_handling);
} else {
DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
if (GetOptimizedCodeNow(job.get(), isolate, compilation_info)) {
return ToCodeT(compilation_info->code(), isolate);
}
DCHECK_EQ(code_kind, CodeKind::MAGLEV);
return CompileMaglev(isolate, function, mode, osr_offset, osr_frame,
result_handling);
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
return {};
}
// When --stress-concurrent-inlining is enabled, spawn concurrent jobs in
@ -1116,6 +1173,9 @@ void SpawnDuplicateConcurrentJobForStressTesting(Isolate* isolate,
Handle<JSFunction> function,
ConcurrencyMode mode,
CodeKind code_kind) {
// TODO(v8:7700): Support Maglev.
if (code_kind == CodeKind::MAGLEV) return;
DCHECK(FLAG_stress_concurrent_inlining &&
isolate->concurrent_recompilation_enabled() &&
mode == ConcurrencyMode::kNotConcurrent &&
@ -2024,11 +2084,11 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
CompilerTracer::TraceFinishBaselineCompile(isolate, shared, time_taken_ms);
if (shared->script().IsScript()) {
Compiler::LogFunctionCompilation(
isolate, CodeEventListener::FUNCTION_TAG,
handle(Script::cast(shared->script()), isolate), shared,
Handle<FeedbackVector>(), Handle<AbstractCode>::cast(code),
CodeKind::BASELINE, time_taken_ms);
LogFunctionCompilation(isolate, CodeEventListener::FUNCTION_TAG,
handle(Script::cast(shared->script()), isolate),
shared, Handle<FeedbackVector>(),
Handle<AbstractCode>::cast(code), CodeKind::BASELINE,
time_taken_ms);
}
return true;
}
@ -2052,6 +2112,32 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
return true;
}
// static
bool Compiler::CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode,
IsCompiledScope* is_compiled_scope) {
#ifdef V8_ENABLE_MAGLEV
// Bytecode must be available for maglev compilation.
DCHECK(is_compiled_scope->is_compiled());
// TODO(v8:7700): Support concurrent compilation.
DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
// Maglev code needs a feedback vector.
JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
MaybeHandle<CodeT> maybe_code = Maglev::Compile(isolate, function);
Handle<CodeT> code;
if (!maybe_code.ToHandle(&code)) return false;
DCHECK_EQ(code->kind(), CodeKind::MAGLEV);
function->set_code(*code);
return true;
#else
return false;
#endif // V8_ENABLE_MAGLEV
}
// static
MaybeHandle<SharedFunctionInfo> Compiler::CompileToplevel(
ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,

View File

@ -77,6 +77,10 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool Compile(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
static MaybeHandle<SharedFunctionInfo> CompileToplevel(
ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
IsCompiledScope* is_compiled_scope);
static bool CompileSharedWithBaseline(Isolate* isolate,
Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag,
@ -84,29 +88,24 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
static bool CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode,
IsCompiledScope* is_compiled_scope);
static void CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind);
static MaybeHandle<SharedFunctionInfo> CompileToplevel(
ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
IsCompiledScope* is_compiled_scope);
static void LogFunctionCompilation(Isolate* isolate,
CodeEventListener::LogEventsAndTags tag,
Handle<Script> script,
Handle<SharedFunctionInfo> shared,
Handle<FeedbackVector> feedback_vector,
Handle<AbstractCode> abstract_code,
CodeKind kind, double time_taken_ms);
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
CompileForLiveEdit(ParseInfo* parse_info, Handle<Script> script,
Isolate* isolate);
// Collect source positions for a function that has already been compiled to
// bytecode, but for which source positions were not collected (e.g. because
// they were not immediately needed).
static bool CollectSourcePositions(Isolate* isolate,
Handle<SharedFunctionInfo> shared);
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
CompileForLiveEdit(ParseInfo* parse_info, Handle<Script> script,
Isolate* isolate);
// Finalize and install code from previously run background compile task.
static bool FinalizeBackgroundCompileTask(BackgroundCompileTask* task,
Isolate* isolate,

View File

@ -99,6 +99,7 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case CodeKind::WASM_TO_JS_FUNCTION:
break;
case CodeKind::BASELINE:
case CodeKind::MAGLEV:
case CodeKind::INTERPRETED_FUNCTION:
case CodeKind::REGEXP:
UNREACHABLE();

View File

@ -1651,10 +1651,12 @@ using FileAndLine = std::pair<const char*, int>;
enum class OptimizationMarker : int32_t {
// These values are set so that it is easy to check if there is a marker where
// some processing needs to be done.
kNone = 0b00,
kInOptimizationQueue = 0b01,
kCompileTurbofan_NotConcurrent = 0b10,
kCompileTurbofan_Concurrent = 0b11,
kNone = 0b000,
kInOptimizationQueue = 0b001,
kCompileMaglev_NotConcurrent = 0b010,
kCompileMaglev_Concurrent = 0b011,
kCompileTurbofan_NotConcurrent = 0b100,
kCompileTurbofan_Concurrent = 0b101,
kLastOptimizationMarker = kCompileTurbofan_Concurrent,
};
// For kNone or kInOptimizationQueue we don't need any special processing.
@ -1664,18 +1666,18 @@ STATIC_ASSERT(static_cast<int>(OptimizationMarker::kNone) == 0b00 &&
static_cast<int>(OptimizationMarker::kInOptimizationQueue) ==
0b01);
STATIC_ASSERT(static_cast<int>(OptimizationMarker::kLastOptimizationMarker) <=
0b11);
static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b10;
inline bool IsInOptimizationQueueMarker(OptimizationMarker marker) {
return marker == OptimizationMarker::kInOptimizationQueue;
}
0b111);
static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b110;
inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) {
switch (marker) {
case OptimizationMarker::kNone:
return os << "OptimizationMarker::kNone";
case OptimizationMarker::kCompileMaglev_NotConcurrent:
return os << "OptimizationMarker::kCompileMaglev_NotConcurrent";
case OptimizationMarker::kCompileMaglev_Concurrent:
return os << "OptimizationMarker::kCompileMaglev_Concurrent";
case OptimizationMarker::kCompileTurbofan_NotConcurrent:
return os << "OptimizationMarker::kCompileTurbofan_NotConcurrent";
case OptimizationMarker::kCompileTurbofan_Concurrent:
@ -1696,14 +1698,24 @@ inline std::ostream& operator<<(std::ostream& os,
case SpeculationMode::kDisallowSpeculation:
return os << "SpeculationMode::kDisallowSpeculation";
}
UNREACHABLE();
return os;
}
enum class BlockingBehavior { kBlock, kDontBlock };
enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
inline const char* ToString(ConcurrencyMode mode) {
switch (mode) {
case ConcurrencyMode::kNotConcurrent:
return "ConcurrencyMode::kNotConcurrent";
case ConcurrencyMode::kConcurrent:
return "ConcurrencyMode::kConcurrent";
}
}
inline std::ostream& operator<<(std::ostream& os, ConcurrencyMode mode) {
return os << ToString(mode);
}
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
C(Handler, handler) \
C(CEntryFP, c_entry_fp) \

View File

@ -140,6 +140,9 @@ class V8_EXPORT_PRIVATE INSTRUCTION_OPERAND_ALIGN InstructionOperand {
// APIs to aid debugging. For general-stream APIs, use operator<<.
void Print() const;
bool operator==(InstructionOperand& other) const { return Equals(other); }
bool operator!=(InstructionOperand& other) const { return !Equals(other); }
protected:
explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}

View File

@ -773,6 +773,12 @@ const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
return header_to_info_.find(header_offset)->second;
}
const LoopInfo* BytecodeAnalysis::TryGetLoopInfoFor(int header_offset) const {
auto it = header_to_info_.find(header_offset);
if (it == header_to_info_.end()) return nullptr;
return &it->second;
}
const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
int offset) const {
if (!analyze_liveness_) return nullptr;

View File

@ -110,6 +110,11 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
int GetLoopOffsetFor(int offset) const;
// Get the loop info of the loop header at {header_offset}.
const LoopInfo& GetLoopInfoFor(int header_offset) const;
// Try to get the loop info of the loop header at {header_offset}, returning
// null if there isn't any.
const LoopInfo* TryGetLoopInfoFor(int header_offset) const;
const ZoneMap<int, LoopInfo>& GetLoopInfos() const { return header_to_info_; }
// Get the top-level resume jump targets.
const ZoneVector<ResumeJumpTarget>& resume_jump_targets() const {

View File

@ -19,6 +19,38 @@ namespace compiler {
class BytecodeLivenessState : public ZoneObject {
public:
class Iterator {
public:
int operator*() const {
// Subtract one to compensate for the accumulator at the start of the
// bit vector.
return *it_ - 1;
}
void operator++() { return ++it_; }
bool operator!=(const Iterator& other) const { return it_ != other.it_; }
private:
static constexpr struct StartTag {
} kStartTag = {};
static constexpr struct EndTag {
} kEndTag = {};
explicit Iterator(const BytecodeLivenessState& liveness, StartTag)
: it_(liveness.bit_vector_.begin()) {
// If we're not at the end, and the current value is the accumulator, skip
// over it.
if (it_ != liveness.bit_vector_.end() && *it_ == 0) {
++it_;
}
}
explicit Iterator(const BytecodeLivenessState& liveness, EndTag)
: it_(liveness.bit_vector_.end()) {}
BitVector::Iterator it_;
friend class BytecodeLivenessState;
};
BytecodeLivenessState(int register_count, Zone* zone)
: bit_vector_(register_count + 1, zone) {}
BytecodeLivenessState(const BytecodeLivenessState&) = delete;
@ -71,6 +103,13 @@ class BytecodeLivenessState : public ZoneObject {
int register_count() const { return bit_vector_.length() - 1; }
// Number of live values, including the accumulator.
int live_value_count() const { return bit_vector_.Count(); }
Iterator begin() const { return Iterator(*this, Iterator::kStartTag); }
Iterator end() const { return Iterator(*this, Iterator::kEndTag); }
private:
BitVector bit_vector_;
};

View File

@ -673,6 +673,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
return BUILTIN;
case CodeKind::TURBOFAN:
case CodeKind::MAGLEV:
return OPTIMIZED;
case CodeKind::BASELINE:
return Type::BASELINE;

View File

@ -513,6 +513,20 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
#ifdef V8_ENABLE_MAGLEV
#define V8_ENABLE_MAGLEV_BOOL true
#else
#define V8_ENABLE_MAGLEV_BOOL false
#endif // V8_ENABLE_MAGLEV
DEFINE_BOOL(maglev, V8_ENABLE_MAGLEV_BOOL,
"enable the maglev optimizing compiler")
DEFINE_STRING(maglev_filter, "*", "optimization filter for the maglev compiler")
DEFINE_BOOL(maglev_break_on_entry, false, "insert an int3 on maglev entries")
DEFINE_BOOL(print_maglev_graph, false, "print maglev graph")
DEFINE_BOOL(print_maglev_code, false, "print maglev code")
DEFINE_BOOL(trace_maglev_regalloc, false, "trace maglev register allocation")
#if ENABLE_SPARKPLUG
DEFINE_WEAK_IMPLICATION(future, sparkplug)
DEFINE_WEAK_IMPLICATION(future, flush_baseline_code)
@ -543,6 +557,8 @@ DEFINE_IMPLICATION(jitless, regexp_interpret_all)
// No Sparkplug compilation.
DEFINE_NEG_IMPLICATION(jitless, sparkplug)
DEFINE_NEG_IMPLICATION(jitless, always_sparkplug)
// No Maglev compilation.
DEFINE_NEG_IMPLICATION(jitless, maglev)
#endif
#ifndef V8_TARGET_ARCH_ARM

View File

@ -53,14 +53,6 @@ void BytecodeArrayIterator::ApplyDebugBreak() {
*cursor = interpreter::Bytecodes::ToByte(debugbreak);
}
int BytecodeArrayIterator::current_bytecode_size() const {
return prefix_size_ + current_bytecode_size_without_prefix();
}
int BytecodeArrayIterator::current_bytecode_size_without_prefix() const {
return Bytecodes::Size(current_bytecode(), current_operand_scale());
}
uint32_t BytecodeArrayIterator::GetUnsignedOperand(
int operand_index, OperandType operand_type) const {
DCHECK_GE(operand_index, 0);

View File

@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator {
BytecodeArrayIterator& operator=(const BytecodeArrayIterator&) = delete;
inline void Advance() {
cursor_ += Bytecodes::Size(current_bytecode(), current_operand_scale());
cursor_ += current_bytecode_size_without_prefix();
UpdateOperandScale();
}
void SetOffset(int offset);
@ -92,11 +92,16 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator {
DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
return current_bytecode;
}
int current_bytecode_size() const;
int current_bytecode_size_without_prefix() const;
int current_bytecode_size() const {
return prefix_size_ + current_bytecode_size_without_prefix();
}
int current_bytecode_size_without_prefix() const {
return Bytecodes::Size(current_bytecode(), current_operand_scale());
}
int current_offset() const {
return static_cast<int>(cursor_ - start_ - prefix_size_);
}
int next_offset() const { return current_offset() + current_bytecode_size(); }
OperandScale current_operand_scale() const { return operand_scale_; }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }

View File

@ -2152,6 +2152,8 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
case CodeKind::INTERPRETED_FUNCTION:
case CodeKind::TURBOFAN:
case CodeKind::BASELINE:
case CodeKind::MAGLEV:
return; // We log this later using LogCompiledFunctions.
case CodeKind::BYTECODE_HANDLER:
return; // We log it later by walking the dispatch table.
case CodeKind::FOR_TESTING:

6
src/maglev/DEPS Normal file
View File

@ -0,0 +1,6 @@
include_rules = [
# Allow Maglev to depend on TurboFan data structures.
# TODO(v8:7700): Clean up these dependencies by extracting common code to a
# separate directory.
"+src/compiler",
]

3
src/maglev/OWNERS Normal file
View File

@ -0,0 +1,3 @@
leszeks@chromium.org
jgruber@chromium.org
verwaest@chromium.org

View File

@ -0,0 +1,107 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
#define V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
#include <vector>
#include "src/codegen/label.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace maglev {
using NodeIterator = Node::List::Iterator;
using NodeConstIterator = Node::List::Iterator;
class BasicBlock {
public:
explicit BasicBlock(MergePointInterpreterFrameState* state)
: control_node_(nullptr), state_(state) {}
uint32_t first_id() const {
if (has_phi()) return phis()->first()->id();
return nodes_.is_empty() ? control_node()->id() : nodes_.first()->id();
}
uint32_t FirstNonGapMoveId() const {
if (has_phi()) return phis()->first()->id();
if (!nodes_.is_empty()) {
for (const Node* node : nodes_) {
if (node->Is<GapMove>()) continue;
return node->id();
}
}
return control_node()->id();
}
Node::List& nodes() { return nodes_; }
ControlNode* control_node() const { return control_node_; }
void set_control_node(ControlNode* control_node) {
DCHECK_NULL(control_node_);
control_node_ = control_node;
}
bool has_phi() const { return has_state() && state_->has_phi(); }
bool is_empty_block() const { return is_empty_block_; }
BasicBlock* empty_block_predecessor() const {
DCHECK(is_empty_block());
return empty_block_predecessor_;
}
void set_empty_block_predecessor(BasicBlock* predecessor) {
DCHECK(nodes_.is_empty());
DCHECK(control_node()->Is<Jump>());
DCHECK_NULL(state_);
is_empty_block_ = true;
empty_block_predecessor_ = predecessor;
}
Phi::List* phis() const {
DCHECK(has_phi());
return state_->phis();
}
BasicBlock* predecessor_at(int i) const {
DCHECK_NOT_NULL(state_);
return state_->predecessor_at(i);
}
int predecessor_id() const {
return control_node()->Cast<UnconditionalControlNode>()->predecessor_id();
}
void set_predecessor_id(int id) {
control_node()->Cast<UnconditionalControlNode>()->set_predecessor_id(id);
}
Label* label() { return &label_; }
MergePointInterpreterFrameState* state() const {
DCHECK(has_state());
return state_;
}
bool has_state() const { return state_ != nullptr && !is_empty_block(); }
private:
bool is_empty_block_ = false;
Node::List nodes_;
ControlNode* control_node_;
union {
MergePointInterpreterFrameState* state_;
BasicBlock* empty_block_predecessor_;
};
Label label_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_

View File

@ -0,0 +1,122 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
#define V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
#include "src/codegen/assembler.h"
#include "src/codegen/label.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/safepoint-table.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
class MaglevCodeGenState {
public:
class DeferredCodeInfo {
public:
virtual void Generate(MaglevCodeGenState* code_gen_state,
Label* return_label) = 0;
Label deferred_code_label;
Label return_label;
};
MaglevCodeGenState(MaglevCompilationUnit* compilation_unit,
SafepointTableBuilder* safepoint_table_builder)
: compilation_unit_(compilation_unit),
safepoint_table_builder_(safepoint_table_builder),
masm_(isolate(), CodeObjectRequired::kNo) {}
void SetVregSlots(int slots) { vreg_slots_ = slots; }
void PushDeferredCode(DeferredCodeInfo* deferred_code) {
deferred_code_.push_back(deferred_code);
}
void EmitDeferredCode() {
for (auto& deferred_code : deferred_code_) {
masm()->RecordComment("-- Deferred block");
masm()->bind(&deferred_code->deferred_code_label);
deferred_code->Generate(this, &deferred_code->return_label);
masm()->int3();
}
}
compiler::NativeContextRef native_context() const {
return broker()->target_native_context();
}
Isolate* isolate() const { return compilation_unit_->isolate(); }
int parameter_count() const { return compilation_unit_->parameter_count(); }
int register_count() const { return compilation_unit_->register_count(); }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis;
}
compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
const compiler::BytecodeArrayRef& bytecode() const {
return compilation_unit_->bytecode;
}
MaglevGraphLabeller* graph_labeller() const {
return compilation_unit_->graph_labeller();
}
MacroAssembler* masm() { return &masm_; }
int vreg_slots() const { return vreg_slots_; }
SafepointTableBuilder* safepoint_table_builder() const {
return safepoint_table_builder_;
}
MaglevCompilationUnit* compilation_unit() const { return compilation_unit_; }
private:
MaglevCompilationUnit* const compilation_unit_;
SafepointTableBuilder* const safepoint_table_builder_;
MacroAssembler masm_;
std::vector<DeferredCodeInfo*> deferred_code_;
int vreg_slots_ = 0;
};
// Some helpers for codegen.
// TODO(leszeks): consider moving this to a separate header.
inline MemOperand GetStackSlot(int index) {
return MemOperand(rbp, StandardFrameConstants::kExpressionsOffset -
index * kSystemPointerSize);
}
inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
return GetStackSlot(operand.index());
}
inline Register ToRegister(const compiler::InstructionOperand& operand) {
return compiler::AllocatedOperand::cast(operand).GetRegister();
}
inline Register ToRegister(const ValueLocation& location) {
return ToRegister(location.operand());
}
inline MemOperand ToMemOperand(const compiler::InstructionOperand& operand) {
return GetStackSlot(compiler::AllocatedOperand::cast(operand));
}
inline MemOperand ToMemOperand(const ValueLocation& location) {
return ToMemOperand(location.operand());
}
inline int GetSafepointIndexForStackSlot(int i) {
// Safepoint tables also contain slots for all fixed frame slots (both
// above and below the fp).
return StandardFrameConstants::kFixedSlotCount + i;
}
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_

View File

@ -0,0 +1,256 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-code-generator.h"
#include "src/codegen/code-desc.h"
#include "src/codegen/safepoint-table.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
#define __ masm()->
namespace {
class MaglevCodeGeneratingNodeProcessor {
public:
static constexpr bool kNeedsCheckpointStates = true;
explicit MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState* code_gen_state)
: code_gen_state_(code_gen_state) {}
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {
if (FLAG_maglev_break_on_entry) {
__ int3();
}
__ EnterFrame(StackFrame::BASELINE);
// Save arguments in frame.
// TODO(leszeks): Consider eliding this frame if we don't make any calls
// that could clobber these registers.
__ Push(kContextRegister);
__ Push(kJSFunctionRegister); // Callee's JS function.
__ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
// Extend rsp by the size of the frame.
code_gen_state_->SetVregSlots(graph->stack_slots());
__ subq(rsp, Immediate(code_gen_state_->vreg_slots() * kSystemPointerSize));
// Initialize stack slots.
// TODO(jgruber): Update logic once the register allocator is further along.
{
ASM_CODE_COMMENT_STRING(masm(), "Initializing stack slots");
__ Move(rax, Immediate(0));
__ Move(rcx, Immediate(code_gen_state_->vreg_slots()));
__ leaq(rdi, GetStackSlot(code_gen_state_->vreg_slots() - 1));
__ repstosq();
}
// We don't emit proper safepoint data yet; instead, define a single
// safepoint at the end of the code object, with all-tagged stack slots.
// TODO(jgruber): Real safepoint handling.
SafepointTableBuilder::Safepoint safepoint =
safepoint_table_builder()->DefineSafepoint(masm());
for (int i = 0; i < code_gen_state_->vreg_slots(); i++) {
safepoint.DefineTaggedStackSlot(GetSafepointIndexForStackSlot(i));
}
}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
code_gen_state_->EmitDeferredCode();
}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {
if (FLAG_code_comments) {
std::stringstream ss;
ss << "-- Block b" << graph_labeller()->BlockId(block);
__ RecordComment(ss.str());
}
__ bind(block->label());
}
template <typename NodeT>
void Process(NodeT* node, const ProcessingState& state) {
if (FLAG_code_comments) {
std::stringstream ss;
ss << "-- " << graph_labeller()->NodeId(node) << ": "
<< PrintNode(graph_labeller(), node);
__ RecordComment(ss.str());
}
// Emit Phi moves before visiting the control node.
if (std::is_base_of<UnconditionalControlNode, NodeT>::value) {
BasicBlock* target =
node->template Cast<UnconditionalControlNode>()->target();
if (target->has_state()) {
int predecessor_id = state.block()->predecessor_id();
__ RecordComment("-- Register merge gap moves:");
for (int index = 0; index < kAllocatableGeneralRegisterCount; ++index) {
RegisterMerge* merge;
if (LoadMergeState(target->state()->register_state()[index],
&merge)) {
compiler::AllocatedOperand source = merge->operand(predecessor_id);
Register reg = MapIndexToRegister(index);
if (FLAG_code_comments) {
std::stringstream ss;
ss << "-- * " << source << "" << reg;
__ RecordComment(ss.str());
}
// TODO(leszeks): Implement parallel moves.
if (source.IsStackSlot()) {
__ movq(reg, GetStackSlot(source));
} else {
__ movq(reg, ToRegister(source));
}
}
}
if (target->has_phi()) {
__ RecordComment("-- Phi gap moves:");
Phi::List* phis = target->phis();
for (Phi* phi : *phis) {
compiler::AllocatedOperand source =
compiler::AllocatedOperand::cast(
phi->input(state.block()->predecessor_id()).operand());
compiler::AllocatedOperand target =
compiler::AllocatedOperand::cast(phi->result().operand());
if (FLAG_code_comments) {
std::stringstream ss;
ss << "-- * " << source << "" << target << " (n"
<< graph_labeller()->NodeId(phi) << ")";
__ RecordComment(ss.str());
}
if (source.IsRegister()) {
Register source_reg = ToRegister(source);
if (target.IsRegister()) {
__ movq(ToRegister(target), source_reg);
} else {
__ movq(GetStackSlot(target), source_reg);
}
} else {
if (target.IsRegister()) {
__ movq(ToRegister(target), GetStackSlot(source));
} else {
__ movq(kScratchRegister, GetStackSlot(source));
__ movq(GetStackSlot(target), kScratchRegister);
}
}
}
}
} else {
__ RecordComment("-- Target has no state, must be a fallthrough");
}
}
node->GenerateCode(code_gen_state_, state);
if (std::is_base_of<ValueNode, NodeT>::value) {
ValueNode* value_node = node->template Cast<ValueNode>();
if (value_node->is_spilled()) {
if (FLAG_code_comments) __ RecordComment("-- Spill:");
compiler::AllocatedOperand source =
compiler::AllocatedOperand::cast(value_node->result().operand());
// We shouldn't spill nodes which already output to the stack.
DCHECK(!source.IsStackSlot());
__ movq(GetStackSlot(value_node->spill_slot()), ToRegister(source));
}
}
}
Isolate* isolate() const { return code_gen_state_->isolate(); }
MacroAssembler* masm() const { return code_gen_state_->masm(); }
MaglevGraphLabeller* graph_labeller() const {
return code_gen_state_->graph_labeller();
}
SafepointTableBuilder* safepoint_table_builder() const {
return code_gen_state_->safepoint_table_builder();
}
private:
MaglevCodeGenState* code_gen_state_;
};
} // namespace
class MaglevCodeGeneratorImpl final {
public:
static Handle<Code> Generate(MaglevCompilationUnit* compilation_unit,
Graph* graph) {
return MaglevCodeGeneratorImpl(compilation_unit, graph).Generate();
}
private:
MaglevCodeGeneratorImpl(MaglevCompilationUnit* compilation_unit, Graph* graph)
: safepoint_table_builder_(compilation_unit->zone()),
code_gen_state_(compilation_unit, safepoint_table_builder()),
processor_(compilation_unit, &code_gen_state_),
graph_(graph) {}
Handle<Code> Generate() {
EmitCode();
EmitMetadata();
return BuildCodeObject();
}
void EmitCode() { processor_.ProcessGraph(graph_); }
void EmitMetadata() {
// Final alignment before starting on the metadata section.
masm()->Align(Code::kMetadataAlignment);
safepoint_table_builder()->Emit(masm(),
stack_slot_count_with_fixed_frame());
}
Handle<Code> BuildCodeObject() {
CodeDesc desc;
static constexpr int kNoHandlerTableOffset = 0;
masm()->GetCode(isolate(), &desc, safepoint_table_builder(),
kNoHandlerTableOffset);
return Factory::CodeBuilder{isolate(), desc, CodeKind::MAGLEV}
.set_stack_slots(stack_slot_count_with_fixed_frame())
.Build();
}
int stack_slot_count() const { return code_gen_state_.vreg_slots(); }
int stack_slot_count_with_fixed_frame() const {
return stack_slot_count() + StandardFrameConstants::kFixedSlotCount;
}
Isolate* isolate() const {
return code_gen_state_.compilation_unit()->isolate();
}
MacroAssembler* masm() { return code_gen_state_.masm(); }
SafepointTableBuilder* safepoint_table_builder() {
return &safepoint_table_builder_;
}
SafepointTableBuilder safepoint_table_builder_;
MaglevCodeGenState code_gen_state_;
GraphProcessor<MaglevCodeGeneratingNodeProcessor> processor_;
Graph* const graph_;
};
// static
Handle<Code> MaglevCodeGenerator::Generate(
MaglevCompilationUnit* compilation_unit, Graph* graph) {
return MaglevCodeGeneratorImpl::Generate(compilation_unit, graph);
}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,27 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
#define V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
#include "src/common/globals.h"
namespace v8 {
namespace internal {
namespace maglev {
class Graph;
struct MaglevCompilationUnit;
class MaglevCodeGenerator : public AllStatic {
public:
static Handle<Code> Generate(MaglevCompilationUnit* compilation_unit,
Graph* graph);
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_

View File

@ -0,0 +1,35 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-compilation-data.h"
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/objects/js-function-inl.h"
namespace v8 {
namespace internal {
namespace maglev {
MaglevCompilationData::MaglevCompilationData(compiler::JSHeapBroker* broker)
: broker(broker),
isolate(broker->isolate()),
zone(broker->isolate()->allocator(), "maglev-zone") {}
MaglevCompilationData::~MaglevCompilationData() = default;
MaglevCompilationUnit::MaglevCompilationUnit(MaglevCompilationData* data,
Handle<JSFunction> function)
: compilation_data(data),
bytecode(
MakeRef(broker(), function->shared().GetBytecodeArray(isolate()))),
feedback(MakeRef(broker(), function->feedback_vector())),
bytecode_analysis(bytecode.object(), zone(), BytecodeOffset::None(),
true),
register_count_(bytecode.register_count()),
parameter_count_(bytecode.parameter_count()) {}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,53 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_COMPILATION_DATA_H_
#define V8_MAGLEV_MAGLEV_COMPILATION_DATA_H_
#include "src/common/globals.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/heap-refs.h"
namespace v8 {
namespace internal {
namespace maglev {
class MaglevGraphLabeller;
struct MaglevCompilationData {
explicit MaglevCompilationData(compiler::JSHeapBroker* broker);
~MaglevCompilationData();
std::unique_ptr<MaglevGraphLabeller> graph_labeller;
compiler::JSHeapBroker* const broker;
Isolate* const isolate;
Zone zone;
};
struct MaglevCompilationUnit {
MaglevCompilationUnit(MaglevCompilationData* data,
Handle<JSFunction> function);
compiler::JSHeapBroker* broker() const { return compilation_data->broker; }
Isolate* isolate() const { return compilation_data->isolate; }
Zone* zone() const { return &compilation_data->zone; }
int register_count() const { return register_count_; }
int parameter_count() const { return parameter_count_; }
bool has_graph_labeller() const { return !!compilation_data->graph_labeller; }
MaglevGraphLabeller* graph_labeller() const {
DCHECK(has_graph_labeller());
return compilation_data->graph_labeller.get();
}
MaglevCompilationData* const compilation_data;
const compiler::BytecodeArrayRef bytecode;
const compiler::FeedbackVectorRef feedback;
compiler::BytecodeAnalysis const bytecode_analysis;
int register_count_;
int parameter_count_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_COMPILATION_DATA_H_

View File

@ -0,0 +1,186 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-compiler.h"
#include <iomanip>
#include <ostream>
#include <type_traits>
#include "src/base/iterator.h"
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/reglist.h"
#include "src/codegen/x64/register-x64.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/heap-refs.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/frames.h"
#include "src/ic/handler-configuration.h"
#include "src/maglev/maglev-basic-block.h"
#include "src/maglev/maglev-code-generator.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-graph-builder.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc.h"
#include "src/maglev/maglev-vreg-allocator.h"
#include "src/objects/code-inl.h"
#include "src/objects/js-function.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace maglev {
class NumberingProcessor {
public:
static constexpr bool kNeedsCheckpointStates = false;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) { node_id_ = 1; }
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
void Process(NodeBase* node, const ProcessingState& state) {
node->set_id(node_id_++);
}
private:
uint32_t node_id_;
};
class UseMarkingProcessor {
public:
static constexpr bool kNeedsCheckpointStates = true;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
void Process(NodeBase* node, const ProcessingState& state) {
if (node->properties().can_deopt()) MarkCheckpointNodes(node, state);
for (Input& input : *node) {
input.node()->mark_use(node->id(), &input);
}
}
void Process(Phi* node, const ProcessingState& state) {
// Don't mark Phi uses when visiting the node, because of loop phis.
// Instead, they'll be visited while processing Jump/JumpLoop.
}
// Specialize the two unconditional jumps to extend their Phis' inputs' live
// ranges.
void Process(JumpLoop* node, const ProcessingState& state) {
int i = state.block()->predecessor_id();
BasicBlock* target = node->target();
if (!target->has_phi()) return;
uint32_t use = node->id();
for (Phi* phi : *target->phis()) {
ValueNode* input = phi->input(i).node();
input->mark_use(use, &phi->input(i));
}
}
void Process(Jump* node, const ProcessingState& state) {
int i = state.block()->predecessor_id();
BasicBlock* target = node->target();
if (!target->has_phi()) return;
uint32_t use = node->id();
for (Phi* phi : *target->phis()) {
ValueNode* input = phi->input(i).node();
input->mark_use(use, &phi->input(i));
}
}
private:
void MarkCheckpointNodes(NodeBase* node, const ProcessingState& state) {
const InterpreterFrameState* checkpoint_state =
state.checkpoint_frame_state();
int use_id = node->id();
for (int i = 0; i < state.parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
ValueNode* node = checkpoint_state->get(reg);
if (node) node->mark_use(use_id, nullptr);
}
for (int i = 0; i < state.register_count(); i++) {
interpreter::Register reg = interpreter::Register(i);
ValueNode* node = checkpoint_state->get(reg);
if (node) node->mark_use(use_id, nullptr);
}
if (checkpoint_state->accumulator()) {
checkpoint_state->accumulator()->mark_use(use_id, nullptr);
}
}
};
MaglevCompiler::MaglevCompiler(compiler::JSHeapBroker* broker,
Handle<JSFunction> function)
: compilation_data_(broker),
toplevel_compilation_unit_(&compilation_data_, function) {}
Handle<Code> MaglevCompiler::Compile() {
// Build graph.
if (FLAG_print_maglev_code || FLAG_code_comments || FLAG_print_maglev_graph ||
FLAG_trace_maglev_regalloc) {
compilation_data_.graph_labeller.reset(new MaglevGraphLabeller());
}
MaglevGraphBuilder graph_builder(&toplevel_compilation_unit_);
graph_builder.Build();
if (FLAG_print_maglev_graph) {
std::cout << "After graph buiding" << std::endl;
PrintGraph(std::cout, &toplevel_compilation_unit_, graph_builder.graph());
}
{
GraphMultiProcessor<NumberingProcessor, UseMarkingProcessor,
MaglevVregAllocator>
processor(&toplevel_compilation_unit_);
processor.ProcessGraph(graph_builder.graph());
}
if (FLAG_print_maglev_graph) {
std::cout << "After node processor" << std::endl;
PrintGraph(std::cout, &toplevel_compilation_unit_, graph_builder.graph());
}
StraightForwardRegisterAllocator allocator(&toplevel_compilation_unit_,
graph_builder.graph());
if (FLAG_print_maglev_graph) {
std::cout << "After register allocation" << std::endl;
PrintGraph(std::cout, &toplevel_compilation_unit_, graph_builder.graph());
}
Handle<Code> code = MaglevCodeGenerator::Generate(&toplevel_compilation_unit_,
graph_builder.graph());
const bool deps_committed_successfully =
broker()->dependencies()->Commit(code);
CHECK(deps_committed_successfully);
if (FLAG_print_maglev_code) {
code->Print();
}
return code;
}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,42 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_COMPILER_H_
#define V8_MAGLEV_MAGLEV_COMPILER_H_
#include "src/common/globals.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/heap-refs.h"
#include "src/maglev/maglev-compilation-data.h"
namespace v8 {
namespace internal {
namespace compiler {
class JSHeapBroker;
}
namespace maglev {
class MaglevCompiler {
public:
explicit MaglevCompiler(compiler::JSHeapBroker* broker,
Handle<JSFunction> function);
Handle<Code> Compile();
compiler::JSHeapBroker* broker() const { return compilation_data_.broker; }
Zone* zone() { return &compilation_data_.zone; }
Isolate* isolate() { return compilation_data_.isolate; }
private:
MaglevCompilationData compilation_data_;
MaglevCompilationUnit toplevel_compilation_unit_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_COMPILER_H_

View File

@ -0,0 +1,438 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-graph-builder.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/heap-refs.h"
#include "src/handles/maybe-handles-inl.h"
#include "src/ic/handler-configuration.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/name-inl.h"
#include "src/objects/slots-inl.h"
namespace v8 {
namespace internal {
namespace maglev {
void MaglevGraphBuilder::VisitLdar() { SetAccumulator(LoadRegister(0)); }
void MaglevGraphBuilder::VisitLdaZero() {
SetAccumulator(AddNewNode<SmiConstant>({}, Smi::zero()));
}
void MaglevGraphBuilder::VisitLdaSmi() {
Smi constant = Smi::FromInt(iterator_.GetImmediateOperand(0));
SetAccumulator(AddNewNode<SmiConstant>({}, constant));
}
void MaglevGraphBuilder::VisitLdaUndefined() {
SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue));
}
void MaglevGraphBuilder::VisitLdaNull() {
SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kNullValue));
}
void MaglevGraphBuilder::VisitLdaTheHole() {
SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kTheHoleValue));
}
void MaglevGraphBuilder::VisitLdaTrue() {
SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kTrueValue));
}
void MaglevGraphBuilder::VisitLdaFalse() {
SetAccumulator(AddNewNode<RootConstant>({}, RootIndex::kFalseValue));
}
void MaglevGraphBuilder::VisitLdaConstant() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaContextSlot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaImmutableContextSlot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaCurrentContextSlot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaImmutableCurrentContextSlot() {
UNREACHABLE();
}
void MaglevGraphBuilder::VisitStar() {
StoreRegister(
iterator_.GetRegisterOperand(0), GetAccumulator(),
bytecode_analysis().GetOutLivenessFor(iterator_.current_offset()));
}
void MaglevGraphBuilder::VisitMov() {
StoreRegister(
iterator_.GetRegisterOperand(1), LoadRegister(0),
bytecode_analysis().GetOutLivenessFor(iterator_.current_offset()));
}
void MaglevGraphBuilder::VisitPushContext() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitPopContext() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestReferenceEqual() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestUndetectable() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestNull() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestUndefined() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestTypeOf() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaGlobal() {
// LdaGlobal <name_index> <slot>
static const int kNameOperandIndex = 0;
static const int kSlotOperandIndex = 1;
compiler::NameRef name = GetRefOperand<Name>(kNameOperandIndex);
FeedbackSlot slot_index = GetSlotOperand(kSlotOperandIndex);
ValueNode* context = GetContext();
USE(slot_index); // TODO(v8:7700): Use the feedback info.
SetAccumulator(AddNewNode<LoadGlobal>({context}, name));
MarkPossibleSideEffect();
}
void MaglevGraphBuilder::VisitLdaGlobalInsideTypeof() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitStaGlobal() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitStaContextSlot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitStaCurrentContextSlot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaLookupSlot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaLookupContextSlot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaLookupGlobalSlot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaLookupSlotInsideTypeof() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaLookupContextSlotInsideTypeof() {
UNREACHABLE();
}
void MaglevGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
UNREACHABLE();
}
void MaglevGraphBuilder::VisitStaLookupSlot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaNamedProperty() {
// LdaNamedProperty <object> <name_index> <slot>
ValueNode* object = LoadRegister(0);
// TODO(leszeks): Use JSHeapBroker here.
FeedbackNexus nexus(feedback().object() /* TODO(v8:7700) */,
GetSlotOperand(2));
if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
EnsureCheckpoint();
AddNewNode<SoftDeopt>({});
}
if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
std::vector<MapAndHandler> maps_and_handlers;
nexus.ExtractMapsAndHandlers(&maps_and_handlers);
DCHECK_EQ(maps_and_handlers.size(), 1);
MapAndHandler& map_and_handler = maps_and_handlers[0];
if (map_and_handler.second->IsSmi()) {
int handler = map_and_handler.second->ToSmi().value();
LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler);
if (kind == LoadHandler::Kind::kField &&
!LoadHandler::IsWasmStructBits::decode(handler)) {
EnsureCheckpoint();
AddNewNode<CheckMaps>({object},
MakeRef(broker(), map_and_handler.first));
SetAccumulator(AddNewNode<LoadField>({object}, handler));
return;
}
}
}
compiler::NameRef name = GetRefOperand<Name>(1);
SetAccumulator(AddNewNode<LoadNamedGeneric>({object}, name));
MarkPossibleSideEffect();
}
void MaglevGraphBuilder::VisitLdaNamedPropertyFromSuper() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaKeyedProperty() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLdaModuleVariable() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitStaModuleVariable() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitStaNamedProperty() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitStaNamedOwnProperty() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitStaKeyedProperty() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitStaKeyedPropertyAsDefine() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitStaInArrayLiteral() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitStaDataPropertyInLiteral() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCollectTypeProfile() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitAdd() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitSub() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitMul() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitDiv() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitMod() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitExp() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitBitwiseOr() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitBitwiseXor() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitBitwiseAnd() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitShiftLeft() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitShiftRight() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitShiftRightLogical() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitAddSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitSubSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitMulSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitDivSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitModSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitExpSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitBitwiseOrSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitBitwiseXorSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitBitwiseAndSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitShiftLeftSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitShiftRightSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitShiftRightLogicalSmi() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitInc() {
// Inc <slot>
FeedbackSlot slot_index = GetSlotOperand(0);
ValueNode* value = GetAccumulator();
ValueNode* node = AddNewNode<Increment>(
{value}, compiler::FeedbackSource{feedback(), slot_index});
SetAccumulator(node);
MarkPossibleSideEffect();
}
void MaglevGraphBuilder::VisitDec() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitNegate() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitBitwiseNot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitToBooleanLogicalNot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitLogicalNot() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTypeOf() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitDeletePropertyStrict() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitDeletePropertySloppy() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitGetSuperConstructor() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCallAnyReceiver() { UNREACHABLE(); }
// TODO(leszeks): For all of these:
// a) Read feedback and implement inlining
// b) Wrap in a helper.
void MaglevGraphBuilder::VisitCallProperty() {
ValueNode* function = LoadRegister(0);
interpreter::RegisterList args = iterator_.GetRegisterListOperand(1);
ValueNode* context = GetContext();
static constexpr int kTheContext = 1;
CallProperty* call_property = AddNewNode<CallProperty>(
args.register_count() + kTheContext, function, context);
// TODO(leszeks): Move this for loop into the CallProperty constructor,
// pre-size the args array.
for (int i = 0; i < args.register_count(); ++i) {
call_property->set_arg(i, current_interpreter_frame_.get(args[i]));
}
SetAccumulator(call_property);
MarkPossibleSideEffect();
}
void MaglevGraphBuilder::VisitCallProperty0() {
ValueNode* function = LoadRegister(0);
ValueNode* context = GetContext();
CallProperty* call_property =
AddNewNode<CallProperty>({function, context, LoadRegister(1)});
SetAccumulator(call_property);
MarkPossibleSideEffect();
}
void MaglevGraphBuilder::VisitCallProperty1() {
ValueNode* function = LoadRegister(0);
ValueNode* context = GetContext();
CallProperty* call_property = AddNewNode<CallProperty>(
{function, context, LoadRegister(1), LoadRegister(2)});
SetAccumulator(call_property);
MarkPossibleSideEffect();
}
void MaglevGraphBuilder::VisitCallProperty2() {
ValueNode* function = LoadRegister(0);
ValueNode* context = GetContext();
CallProperty* call_property = AddNewNode<CallProperty>(
{function, context, LoadRegister(1), LoadRegister(2), LoadRegister(3)});
SetAccumulator(call_property);
MarkPossibleSideEffect();
}
void MaglevGraphBuilder::VisitCallUndefinedReceiver() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCallUndefinedReceiver0() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCallUndefinedReceiver1() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCallUndefinedReceiver2() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCallWithSpread() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCallRuntime() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCallRuntimeForPair() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCallJSRuntime() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitInvokeIntrinsic() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitConstruct() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitConstructWithSpread() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestEqual() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestEqualStrict() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestLessThan() {
// TestLessThan <src> <slot>
ValueNode* left = LoadRegister(0);
FeedbackSlot slot_index = GetSlotOperand(1);
ValueNode* right = GetAccumulator();
USE(slot_index); // TODO(v8:7700): Use the feedback info.
ValueNode* node = AddNewNode<LessThan>(
{left, right}, compiler::FeedbackSource{feedback(), slot_index});
SetAccumulator(node);
MarkPossibleSideEffect();
}
void MaglevGraphBuilder::VisitTestGreaterThan() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestLessThanOrEqual() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestGreaterThanOrEqual() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestInstanceOf() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitTestIn() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitToName() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitToNumber() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitToNumeric() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitToObject() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitToString() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateRegExpLiteral() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateArrayLiteral() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateArrayFromIterable() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateEmptyArrayLiteral() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateObjectLiteral() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateEmptyObjectLiteral() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCloneObject() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitGetTemplateObject() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateClosure() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateBlockContext() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateCatchContext() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateFunctionContext() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateEvalContext() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateWithContext() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateMappedArguments() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateUnmappedArguments() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitCreateRestParameter() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitJumpLoop() {
int target = iterator_.GetJumpTargetOffset();
BasicBlock* block =
target == iterator_.current_offset()
? FinishBlock<JumpLoop>(next_offset(), {}, &jump_targets_[target])
: FinishBlock<JumpLoop>(next_offset(), {},
jump_targets_[target].block_ptr());
merge_states_[target]->MergeLoop(*compilation_unit_,
current_interpreter_frame_, block, target);
block->set_predecessor_id(0);
}
void MaglevGraphBuilder::VisitJump() {
BasicBlock* block = FinishBlock<Jump>(
next_offset(), {}, &jump_targets_[iterator_.GetJumpTargetOffset()]);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
DCHECK_LT(next_offset(), bytecode().length());
}
void MaglevGraphBuilder::VisitJumpConstant() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitJumpIfNullConstant() { VisitJumpIfNull(); }
void MaglevGraphBuilder::VisitJumpIfNotNullConstant() { VisitJumpIfNotNull(); }
void MaglevGraphBuilder::VisitJumpIfUndefinedConstant() {
VisitJumpIfUndefined();
}
void MaglevGraphBuilder::VisitJumpIfNotUndefinedConstant() {
VisitJumpIfNotUndefined();
}
void MaglevGraphBuilder::VisitJumpIfUndefinedOrNullConstant() {
VisitJumpIfUndefinedOrNull();
}
void MaglevGraphBuilder::VisitJumpIfTrueConstant() { VisitJumpIfTrue(); }
void MaglevGraphBuilder::VisitJumpIfFalseConstant() { VisitJumpIfFalse(); }
void MaglevGraphBuilder::VisitJumpIfJSReceiverConstant() {
VisitJumpIfJSReceiver();
}
void MaglevGraphBuilder::VisitJumpIfToBooleanTrueConstant() {
VisitJumpIfToBooleanTrue();
}
void MaglevGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
VisitJumpIfToBooleanFalse();
}
void MaglevGraphBuilder::MergeIntoFrameState(BasicBlock* predecessor,
int target) {
if (merge_states_[target] == nullptr) {
DCHECK(!bytecode_analysis().IsLoopHeader(target));
const compiler::BytecodeLivenessState* liveness =
bytecode_analysis().GetInLivenessFor(target);
// If there's no target frame state, allocate a new one.
merge_states_[target] = zone()->New<MergePointInterpreterFrameState>(
*compilation_unit_, current_interpreter_frame_, target,
NumPredecessors(target), predecessor, liveness);
} else {
// If there already is a frame state, merge.
merge_states_[target]->Merge(*compilation_unit_, current_interpreter_frame_,
predecessor, target);
}
}
void MaglevGraphBuilder::BuildBranchIfTrue(ValueNode* node, int true_target,
int false_target) {
// TODO(verwaest): Materialize true/false in the respective environments.
if (GetOutLiveness()->AccumulatorIsLive()) SetAccumulator(node);
BasicBlock* block = FinishBlock<BranchIfTrue>(next_offset(), {node},
&jump_targets_[true_target],
&jump_targets_[false_target]);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
}
void MaglevGraphBuilder::BuildBranchIfToBooleanTrue(ValueNode* node,
int true_target,
int false_target) {
// TODO(verwaest): Materialize true/false in the respective environments.
if (GetOutLiveness()->AccumulatorIsLive()) SetAccumulator(node);
BasicBlock* block = FinishBlock<BranchIfToBooleanTrue>(
next_offset(), {node}, &jump_targets_[true_target],
&jump_targets_[false_target]);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
}
void MaglevGraphBuilder::VisitJumpIfToBooleanTrue() {
BuildBranchIfToBooleanTrue(GetAccumulator(), iterator_.GetJumpTargetOffset(),
next_offset());
}
void MaglevGraphBuilder::VisitJumpIfToBooleanFalse() {
BuildBranchIfToBooleanTrue(GetAccumulator(), next_offset(),
iterator_.GetJumpTargetOffset());
}
void MaglevGraphBuilder::VisitJumpIfTrue() {
BuildBranchIfTrue(GetAccumulator(), iterator_.GetJumpTargetOffset(),
next_offset());
}
void MaglevGraphBuilder::VisitJumpIfFalse() {
BuildBranchIfTrue(GetAccumulator(), next_offset(),
iterator_.GetJumpTargetOffset());
}
void MaglevGraphBuilder::VisitJumpIfNull() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitJumpIfNotNull() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitJumpIfUndefined() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitJumpIfNotUndefined() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitJumpIfUndefinedOrNull() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitJumpIfJSReceiver() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitSwitchOnSmiNoFeedback() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitForInEnumerate() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitForInPrepare() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitForInContinue() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitForInNext() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitForInStep() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitSetPendingMessage() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitThrow() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitReThrow() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitReturn() {
FinishBlock<Return>(next_offset(), {GetAccumulator()});
}
void MaglevGraphBuilder::VisitThrowReferenceErrorIfHole() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitThrowSuperNotCalledIfHole() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitThrowSuperAlreadyCalledIfNotHole() {
UNREACHABLE();
}
void MaglevGraphBuilder::VisitThrowIfNotSuperConstructor() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitSwitchOnGeneratorState() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitSuspendGenerator() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitResumeGenerator() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitGetIterator() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitDebugger() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitIncBlockCounter() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitAbort() { UNREACHABLE(); }
#define SHORT_STAR_VISITOR(Name, ...) \
void MaglevGraphBuilder::Visit##Name() { \
StoreRegister( \
interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name), \
GetAccumulator(), \
bytecode_analysis().GetOutLivenessFor(iterator_.current_offset())); \
}
SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
#undef SHORT_STAR_VISITOR
void MaglevGraphBuilder::VisitWide() { UNREACHABLE(); }
void MaglevGraphBuilder::VisitExtraWide() { UNREACHABLE(); }
#define DEBUG_BREAK(Name, ...) \
void MaglevGraphBuilder::Visit##Name() { UNREACHABLE(); }
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK)
#undef DEBUG_BREAK
void MaglevGraphBuilder::VisitIllegal() { UNREACHABLE(); }
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,434 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
#define V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_
#include <type_traits>
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/heap-refs.h"
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
#include "src/utils/memcopy.h"
namespace v8 {
namespace internal {
namespace maglev {
class MaglevGraphBuilder {
public:
explicit MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
: compilation_unit_(compilation_unit),
iterator_(bytecode().object()),
jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length())),
// Overallocate merge_states_ by one to allow always looking up the
// next offset.
merge_states_(zone()->NewArray<MergePointInterpreterFrameState*>(
bytecode().length() + 1)),
graph_(zone()),
current_interpreter_frame_(*compilation_unit_) {
memset(merge_states_, 0,
bytecode().length() * sizeof(InterpreterFrameState*));
// Default construct basic block refs.
// TODO(leszeks): This could be a memset of nullptr to ..._jump_targets_.
for (int i = 0; i < bytecode().length(); ++i) {
new (&jump_targets_[i]) BasicBlockRef();
}
CalculatePredecessorCounts();
for (auto& offset_and_info : bytecode_analysis().GetLoopInfos()) {
int offset = offset_and_info.first;
const compiler::LoopInfo& loop_info = offset_and_info.second;
const compiler::BytecodeLivenessState* liveness =
bytecode_analysis().GetInLivenessFor(offset);
merge_states_[offset] = zone()->New<MergePointInterpreterFrameState>(
*compilation_unit_, offset, NumPredecessors(offset), liveness,
&loop_info);
}
current_block_ = zone()->New<BasicBlock>(nullptr);
block_offset_ = -1;
for (int i = 0; i < parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
}
// interpreter::Register regs[] = {interpreter::Register::current_context(),
// interpreter::Register::function_closure(),
// interpreter::Register::bytecode_array(),
// interpreter::Register::bytecode_offset()};
// for (interpreter::Register& reg : regs) {
// current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
// }
// TODO(leszeks): Extract out a separate "incoming context" node to be able
// to read in the context arg but also use the frame-spilled context var.
current_interpreter_frame_.set(
interpreter::Register::current_context(),
AddNewNode<InitialValue>({}, interpreter::Register::current_context()));
interpreter::Register new_target_or_generator_register =
bytecode().incoming_new_target_or_generator_register();
const compiler::BytecodeLivenessState* liveness =
bytecode_analysis().GetInLivenessFor(0);
int register_index = 0;
// TODO(leszeks): Don't emit if not needed.
ValueNode* undefined_value =
AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue);
if (new_target_or_generator_register.is_valid()) {
int new_target_index = new_target_or_generator_register.index();
for (; register_index < new_target_index; register_index++) {
StoreRegister(interpreter::Register(register_index), undefined_value,
liveness);
}
StoreRegister(
new_target_or_generator_register,
// TODO(leszeks): Expose in Graph.
AddNewNode<RegisterInput>({}, kJavaScriptCallNewTargetRegister),
liveness);
register_index++;
}
for (; register_index < register_count(); register_index++) {
StoreRegister(interpreter::Register(register_index), undefined_value,
liveness);
}
BasicBlock* first_block = CreateBlock<Jump>({}, &jump_targets_[0]);
MergeIntoFrameState(first_block, 0);
}
void Build() {
for (iterator_.Reset(); !iterator_.done(); iterator_.Advance()) {
VisitSingleBytecode();
}
}
Graph* graph() { return &graph_; }
private:
BasicBlock* CreateEmptyBlock(int offset, BasicBlock* predecessor) {
DCHECK_NULL(current_block_);
current_block_ = zone()->New<BasicBlock>(nullptr);
BasicBlock* result = CreateBlock<Jump>({}, &jump_targets_[offset]);
result->set_empty_block_predecessor(predecessor);
return result;
}
void ProcessMergePoint(int offset) {
// First copy the merge state to be the current state.
MergePointInterpreterFrameState& merge_state = *merge_states_[offset];
current_interpreter_frame_.CopyFrom(*compilation_unit_, merge_state);
if (merge_state.predecessor_count() == 1) return;
// Set up edge-split.
int predecessor_index = merge_state.predecessor_count() - 1;
BasicBlockRef* old_jump_targets = jump_targets_[offset].Reset();
while (old_jump_targets != nullptr) {
BasicBlock* predecessor = merge_state.predecessor_at(predecessor_index);
ControlNode* control = predecessor->control_node();
if (control->Is<ConditionalControlNode>()) {
// CreateEmptyBlock automatically registers itself with the offset.
predecessor = CreateEmptyBlock(offset, predecessor);
// Set the old predecessor's (the conditional block) reference to
// point to the new empty predecessor block.
old_jump_targets =
old_jump_targets->SetToBlockAndReturnNext(predecessor);
} else {
// Re-register the block in the offset's ref list.
old_jump_targets =
old_jump_targets->MoveToRefList(&jump_targets_[offset]);
}
predecessor->set_predecessor_id(predecessor_index--);
}
#ifdef DEBUG
if (bytecode_analysis().IsLoopHeader(offset)) {
// For loops, the JumpLoop block hasn't been generated yet, and so isn't
// in the list of jump targets. It's defined to be at index 0, so once
// we've processed all the jump targets, the 0 index should be the one
// remaining.
DCHECK_EQ(predecessor_index, 0);
} else {
DCHECK_EQ(predecessor_index, -1);
}
#endif
if (has_graph_labeller()) {
for (Phi* phi : *merge_states_[offset]->phis()) {
graph_labeller()->RegisterNode(phi);
}
}
}
void VisitSingleBytecode() {
int offset = iterator_.current_offset();
if (V8_UNLIKELY(merge_states_[offset] != nullptr)) {
if (current_block_ != nullptr) {
DCHECK(!current_block_->nodes().is_empty());
FinishBlock<Jump>(offset, {}, &jump_targets_[offset]);
merge_states_[offset]->Merge(*compilation_unit_,
current_interpreter_frame_,
graph_.last_block(), offset);
}
ProcessMergePoint(offset);
StartNewBlock(offset);
}
DCHECK_NOT_NULL(current_block_);
switch (iterator_.current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
Visit##name(); \
break;
BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CASE
}
}
#define BYTECODE_VISITOR(name, ...) void Visit##name();
BYTECODE_LIST(BYTECODE_VISITOR)
#undef BYTECODE_VISITOR
template <typename NodeT>
NodeT* AddNode(NodeT* node) {
current_block_->nodes().Add(node);
return node;
}
template <typename NodeT, typename... Args>
NodeT* NewNode(size_t input_count, Args&&... args) {
NodeT* node =
Node::New<NodeT>(zone(), input_count, std::forward<Args>(args)...);
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
return node;
}
template <typename NodeT, typename... Args>
NodeT* AddNewNode(size_t input_count, Args&&... args) {
return AddNode(NewNode<NodeT>(input_count, std::forward<Args>(args)...));
}
template <typename NodeT, typename... Args>
NodeT* NewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
NodeT* node = Node::New<NodeT>(zone(), inputs, std::forward<Args>(args)...);
if (has_graph_labeller()) graph_labeller()->RegisterNode(node);
return node;
}
template <typename NodeT, typename... Args>
NodeT* AddNewNode(std::initializer_list<ValueNode*> inputs, Args&&... args) {
return AddNode(NewNode<NodeT>(inputs, std::forward<Args>(args)...));
}
ValueNode* GetContext() const {
return current_interpreter_frame_.get(
interpreter::Register::current_context());
}
FeedbackSlot GetSlotOperand(int operand_index) {
return iterator_.GetSlotOperand(operand_index);
}
template <class T, typename = std::enable_if_t<
std::is_convertible<T*, Object*>::value>>
typename compiler::ref_traits<T>::ref_type GetRefOperand(int operand_index) {
return MakeRef(broker(),
Handle<T>::cast(iterator_.GetConstantForIndexOperand(
operand_index, isolate())));
}
void SetAccumulator(ValueNode* node) {
current_interpreter_frame_.set_accumulator(node);
}
ValueNode* GetAccumulator() const {
return current_interpreter_frame_.accumulator();
}
ValueNode* LoadRegister(int operand_index) {
interpreter::Register source = iterator_.GetRegisterOperand(operand_index);
return current_interpreter_frame_.get(source);
}
void StoreRegister(interpreter::Register target, ValueNode* value,
const compiler::BytecodeLivenessState* liveness) {
if (target.index() >= 0 && !liveness->RegisterIsLive(target.index())) {
return;
}
current_interpreter_frame_.set(target, value);
AddNewNode<StoreToFrame>({}, value, target);
}
void AddCheckpoint() {
// TODO(v8:7700): Verify this calls the initializer list overload.
AddNewNode<Checkpoint>({}, iterator_.current_offset(),
GetInLiveness()->AccumulatorIsLive(),
GetAccumulator());
has_valid_checkpoint_ = true;
}
void EnsureCheckpoint() {
if (!has_valid_checkpoint_) AddCheckpoint();
}
void MarkPossibleSideEffect() {
// If there was a potential side effect, invalidate the previous checkpoint.
has_valid_checkpoint_ = false;
}
int next_offset() const {
return iterator_.current_offset() + iterator_.current_bytecode_size();
}
const compiler::BytecodeLivenessState* GetInLiveness() const {
return bytecode_analysis().GetInLivenessFor(iterator_.current_offset());
}
const compiler::BytecodeLivenessState* GetOutLiveness() const {
return bytecode_analysis().GetOutLivenessFor(iterator_.current_offset());
}
void StartNewBlock(int offset) {
DCHECK_NULL(current_block_);
current_block_ = zone()->New<BasicBlock>(merge_states_[offset]);
block_offset_ = offset;
}
template <typename ControlNodeT, typename... Args>
BasicBlock* CreateBlock(std::initializer_list<ValueNode*> control_inputs,
Args&&... args) {
current_block_->set_control_node(NodeBase::New<ControlNodeT>(
zone(), control_inputs, std::forward<Args>(args)...));
BasicBlock* block = current_block_;
current_block_ = nullptr;
graph_.Add(block);
if (has_graph_labeller()) {
graph_labeller()->RegisterBasicBlock(block);
}
return block;
}
template <typename ControlNodeT, typename... Args>
BasicBlock* FinishBlock(int next_block_offset,
std::initializer_list<ValueNode*> control_inputs,
Args&&... args) {
BasicBlock* block =
CreateBlock<ControlNodeT>(control_inputs, std::forward<Args>(args)...);
// Resolve pointers to this basic block.
BasicBlockRef* jump_target_refs_head =
jump_targets_[block_offset_].SetToBlockAndReturnNext(block);
while (jump_target_refs_head != nullptr) {
jump_target_refs_head =
jump_target_refs_head->SetToBlockAndReturnNext(block);
}
DCHECK_EQ(jump_targets_[block_offset_].block_ptr(), block);
// If the next block has merge states, then it's not a simple fallthrough,
// and we should reset the checkpoint validity.
if (merge_states_[next_block_offset] != nullptr) {
has_valid_checkpoint_ = false;
}
// Start a new block for the fallthrough path, unless it's a merge point, in
// which case we merge our state into it. That merge-point could also be a
// loop header, in which case the merge state might not exist yet (if the
// only predecessors are this path and the JumpLoop).
if (std::is_base_of<ConditionalControlNode, ControlNodeT>::value) {
if (NumPredecessors(next_block_offset) == 1) {
StartNewBlock(next_block_offset);
} else {
DCHECK_NULL(current_block_);
MergeIntoFrameState(block, next_block_offset);
}
}
return block;
}
void MergeIntoFrameState(BasicBlock* block, int target);
void BuildBranchIfTrue(ValueNode* node, int true_target, int false_target);
void BuildBranchIfToBooleanTrue(ValueNode* node, int true_target,
int false_target);
void CalculatePredecessorCounts() {
// Add 1 after the end of the bytecode so we can always write to the offset
// after the last bytecode.
size_t array_length = bytecode().length() + 1;
predecessors_ = zone()->NewArray<uint32_t>(array_length);
MemsetUint32(predecessors_, 1, array_length);
interpreter::BytecodeArrayIterator iterator(bytecode().object());
for (; !iterator.done(); iterator.Advance()) {
interpreter::Bytecode bytecode = iterator.current_bytecode();
if (interpreter::Bytecodes::IsJump(bytecode)) {
predecessors_[iterator.GetJumpTargetOffset()]++;
if (!interpreter::Bytecodes::IsConditionalJump(bytecode)) {
predecessors_[iterator.next_offset()]--;
}
} else if (interpreter::Bytecodes::IsSwitch(bytecode)) {
for (auto offset : iterator.GetJumpTableTargetOffsets()) {
predecessors_[offset.target_offset]++;
}
} else if (interpreter::Bytecodes::Returns(bytecode) ||
interpreter::Bytecodes::UnconditionallyThrows(bytecode)) {
predecessors_[iterator.next_offset()]--;
}
// TODO(leszeks): Also consider handler entries (the bytecode analysis)
// will do this automatically I guess if we merge this into that.
}
DCHECK_EQ(0, predecessors_[bytecode().length()]);
}
int NumPredecessors(int offset) { return predecessors_[offset]; }
compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
const compiler::FeedbackVectorRef& feedback() const {
return compilation_unit_->feedback;
}
const compiler::BytecodeArrayRef& bytecode() const {
return compilation_unit_->bytecode;
}
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis;
}
Isolate* isolate() const { return compilation_unit_->isolate(); }
Zone* zone() const { return compilation_unit_->zone(); }
int parameter_count() const { return compilation_unit_->parameter_count(); }
int register_count() const { return compilation_unit_->register_count(); }
bool has_graph_labeller() const {
return compilation_unit_->has_graph_labeller();
}
MaglevGraphLabeller* graph_labeller() const {
return compilation_unit_->graph_labeller();
}
MaglevCompilationUnit* const compilation_unit_;
interpreter::BytecodeArrayIterator iterator_;
uint32_t* predecessors_;
// Current block information.
BasicBlock* current_block_ = nullptr;
int block_offset_ = 0;
bool has_valid_checkpoint_ = false;
BasicBlockRef* jump_targets_;
MergePointInterpreterFrameState** merge_states_;
Graph graph_;
InterpreterFrameState current_interpreter_frame_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_GRAPH_BUILDER_H_

View File

@ -0,0 +1,65 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
#define V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
#include <map>
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
class MaglevGraphLabeller {
public:
void RegisterNode(const Node* node) {
if (node_ids_.emplace(node, next_node_id_).second) {
next_node_id_++;
}
}
void RegisterBasicBlock(const BasicBlock* block) {
block_ids_[block] = next_block_id_++;
if (node_ids_.emplace(block->control_node(), next_node_id_).second) {
next_node_id_++;
}
}
int BlockId(const BasicBlock* block) { return block_ids_[block]; }
int NodeId(const NodeBase* node) { return node_ids_[node]; }
int max_node_id() const { return next_node_id_ - 1; }
int max_node_id_width() const { return std::ceil(std::log10(max_node_id())); }
void PrintNodeLabel(std::ostream& os, const Node* node) {
auto node_id_it = node_ids_.find(node);
if (node_id_it == node_ids_.end()) {
os << "<invalid node " << node << ">";
return;
}
os << "n" << node_id_it->second;
}
void PrintInput(std::ostream& os, const Input& input) {
PrintNodeLabel(os, input.node());
os << ":" << input.operand();
}
private:
std::map<const BasicBlock*, int> block_ids_;
std::map<const NodeBase*, int> node_ids_;
int next_block_id_ = 1;
int next_node_id_ = 1;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_

View File

@ -0,0 +1,446 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-graph-printer.h"
#include <initializer_list>
#include <iomanip>
#include <ostream>
#include <type_traits>
#include <vector>
#include "src/maglev/maglev-basic-block.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-processor.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
namespace {
void PrintPaddedId(std::ostream& os, MaglevGraphLabeller* graph_labeller,
NodeBase* node, std::string padding = " ",
int padding_adjustement = 0) {
int id = graph_labeller->NodeId(node);
int id_width = std::ceil(std::log10(id + 1));
int max_width = graph_labeller->max_node_id_width() + 2 + padding_adjustement;
int padding_width = std::max(0, max_width - id_width);
for (int i = 0; i < padding_width; ++i) {
os << padding;
}
os << graph_labeller->NodeId(node) << ": ";
}
void PrintPadding(std::ostream& os, int size) {
os << std::setfill(' ') << std::setw(size) << "";
}
void PrintPadding(std::ostream& os, MaglevGraphLabeller* graph_labeller,
int padding_adjustement = 0) {
PrintPadding(os,
graph_labeller->max_node_id_width() + 2 + padding_adjustement);
}
enum ConnectionLocation {
kTop = 1 << 0,
kLeft = 1 << 1,
kRight = 1 << 2,
kBottom = 1 << 3
};
struct Connection {
void Connect(ConnectionLocation loc) { connected |= loc; }
void AddHorizontal() {
Connect(kLeft);
Connect(kRight);
}
void AddVertical() {
Connect(kTop);
Connect(kBottom);
}
const char* ToString() const {
switch (connected) {
case 0:
return " ";
case kTop:
return "";
case kLeft:
return "";
case kRight:
return "";
case kBottom:
return "";
case kTop | kLeft:
return "";
case kTop | kRight:
return "";
case kBottom | kLeft:
return "";
case kBottom | kRight:
return "";
case kTop | kBottom:
return "";
case kLeft | kRight:
return "";
case kTop | kBottom | kLeft:
return "";
case kTop | kBottom | kRight:
return "";
case kLeft | kRight | kTop:
return "";
case kLeft | kRight | kBottom:
return "";
case kTop | kLeft | kRight | kBottom:
return "";
}
UNREACHABLE();
}
uint8_t connected = 0;
};
std::ostream& operator<<(std::ostream& os, const Connection& c) {
return os << c.ToString();
}
// Print the vertical parts of connection arrows, optionally connecting arrows
// that were only first created on this line (passed in "arrows_starting_here")
// and should therefore connect rightwards instead of upwards.
void PrintVerticalArrows(
std::ostream& os, const std::vector<BasicBlock*>& targets,
const std::set<size_t>& arrows_starting_here = {},
const std::set<BasicBlock*>& targets_starting_here = {},
bool is_loop = false) {
bool saw_start = false;
for (size_t i = 0; i < targets.size(); ++i) {
Connection c;
if (saw_start) {
c.AddHorizontal();
}
if (arrows_starting_here.find(i) != arrows_starting_here.end() ||
targets_starting_here.find(targets[i]) != targets_starting_here.end()) {
c.Connect(kRight);
c.Connect(is_loop ? kTop : kBottom);
saw_start = true;
}
// Only add the vertical connection if there was no other connection.
if (c.connected == 0 && targets[i] != nullptr) {
c.AddVertical();
}
os << c;
}
}
// Add a target to the target list in the first non-null position from the end.
// This might have to extend the target list if there is no free spot.
size_t AddTarget(std::vector<BasicBlock*>& targets, BasicBlock* target) {
if (targets.size() == 0 || targets.back() != nullptr) {
targets.push_back(target);
return targets.size() - 1;
}
size_t i = targets.size();
while (i > 0) {
if (targets[i - 1] != nullptr) break;
i--;
}
targets[i] = target;
return i;
}
// If the target is not a fallthrough, add i to the target list in the first
// non-null position from the end. This might have to extend the target list if
// there is no free spot. Returns true if it was added, false if it was a
// fallthrough.
bool AddTargetIfNotNext(std::vector<BasicBlock*>& targets, BasicBlock* target,
BasicBlock* next_block,
std::set<size_t>* arrows_starting_here = nullptr) {
if (next_block == target) return false;
size_t index = AddTarget(targets, target);
if (arrows_starting_here != nullptr) arrows_starting_here->insert(index);
return true;
}
class MaglevPrintingVisitorOstream : public std::ostream,
private std::streambuf {
public:
MaglevPrintingVisitorOstream(std::ostream& os,
std::vector<BasicBlock*>* targets)
: std::ostream(this), os_(os), targets_(targets), padding_size_(0) {}
~MaglevPrintingVisitorOstream() override = default;
static MaglevPrintingVisitorOstream* cast(
const std::unique_ptr<std::ostream>& os) {
return static_cast<MaglevPrintingVisitorOstream*>(os.get());
}
void set_padding(int padding_size) { padding_size_ = padding_size; }
protected:
int overflow(int c) override;
private:
std::ostream& os_;
std::vector<BasicBlock*>* targets_;
int padding_size_;
bool previous_was_new_line_ = true;
};
int MaglevPrintingVisitorOstream::overflow(int c) {
if (c == EOF) return c;
if (previous_was_new_line_) {
PrintVerticalArrows(os_, *targets_);
PrintPadding(os_, padding_size_);
}
os_.rdbuf()->sputc(c);
previous_was_new_line_ = (c == '\n');
return c;
}
} // namespace
MaglevPrintingVisitor::MaglevPrintingVisitor(std::ostream& os)
: os_(os),
os_for_additional_info_(new MaglevPrintingVisitorOstream(os_, &targets)) {
}
void MaglevPrintingVisitor::PreProcessGraph(
MaglevCompilationUnit* compilation_unit, Graph* graph) {
os_ << "Graph (param count: " << compilation_unit->parameter_count()
<< ", frame size: " << compilation_unit->register_count() << ")\n\n";
for (BasicBlock* block : *graph) {
if (block->control_node()->Is<JumpLoop>()) {
loop_headers.insert(block->control_node()->Cast<JumpLoop>()->target());
}
}
// Precalculate the maximum number of targets.
for (BlockConstIterator block_it = graph->begin(); block_it != graph->end();
++block_it) {
BasicBlock* block = *block_it;
std::replace(targets.begin(), targets.end(), block,
static_cast<BasicBlock*>(nullptr));
if (loop_headers.find(block) != loop_headers.end()) {
AddTarget(targets, block);
}
ControlNode* node = block->control_node();
if (node->Is<JumpLoop>()) {
BasicBlock* target = node->Cast<JumpLoop>()->target();
std::replace(targets.begin(), targets.end(), target,
static_cast<BasicBlock*>(nullptr));
} else if (node->Is<UnconditionalControlNode>()) {
AddTargetIfNotNext(targets,
node->Cast<UnconditionalControlNode>()->target(),
*(block_it + 1));
} else if (node->Is<ConditionalControlNode>()) {
AddTargetIfNotNext(targets,
node->Cast<ConditionalControlNode>()->if_true(),
*(block_it + 1));
AddTargetIfNotNext(targets,
node->Cast<ConditionalControlNode>()->if_false(),
*(block_it + 1));
}
}
DCHECK(std::all_of(targets.begin(), targets.end(),
[](BasicBlock* block) { return block == nullptr; }));
}
void MaglevPrintingVisitor::PreProcessBasicBlock(
MaglevCompilationUnit* compilation_unit, BasicBlock* block) {
MaglevGraphLabeller* graph_labeller = compilation_unit->graph_labeller();
size_t loop_position = static_cast<size_t>(-1);
if (loop_headers.erase(block) > 0) {
loop_position = AddTarget(targets, block);
}
{
bool saw_start = false;
for (size_t i = 0; i < targets.size(); ++i) {
Connection c;
if (saw_start) {
c.AddHorizontal();
}
// If this is one of the arrows pointing to this block, terminate the
// line by connecting it rightwards.
if (targets[i] == block) {
c.Connect(kRight);
// If this is the loop header, go down instead of up and don't clear
// the target.
if (i == loop_position) {
c.Connect(kBottom);
} else {
c.Connect(kTop);
targets[i] = nullptr;
}
saw_start = true;
} else if (c.connected == 0 && targets[i] != nullptr) {
// If this is another arrow, connect it, but only if that doesn't
// clobber any existing drawing.
c.AddVertical();
}
os_ << c;
}
os_ << (saw_start ? "" : " ");
}
int block_id = graph_labeller->BlockId(block);
os_ << "Block b" << block_id << "\n";
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)->set_padding(1);
}
void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
PrintVerticalArrows(os_, targets);
PrintPaddedId(os_, graph_labeller, phi);
os_ << "Phi (";
// Manually walk Phi inputs to print just the node labels, without
// input locations (which are shown in the predecessor block's gap
// moves).
for (int i = 0; i < phi->input_count(); ++i) {
if (i > 0) os_ << ", ";
os_ << PrintNodeLabel(graph_labeller, phi->input(i).node());
}
os_ << ") → " << phi->result().operand() << "\n";
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
->set_padding(graph_labeller->max_node_id_width() + 4);
}
void MaglevPrintingVisitor::Process(Node* node, const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
PrintVerticalArrows(os_, targets);
PrintPaddedId(os_, graph_labeller, node);
os_ << PrintNode(graph_labeller, node) << "\n";
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
->set_padding(graph_labeller->max_node_id_width() + 4);
}
void MaglevPrintingVisitor::Process(ControlNode* control_node,
const ProcessingState& state) {
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
bool has_fallthrough = false;
if (control_node->Is<JumpLoop>()) {
BasicBlock* target = control_node->Cast<JumpLoop>()->target();
PrintVerticalArrows(os_, targets, {}, {target}, true);
os_ << "◄─";
PrintPaddedId(os_, graph_labeller, control_node, "", -2);
std::replace(targets.begin(), targets.end(), target,
static_cast<BasicBlock*>(nullptr));
} else if (control_node->Is<UnconditionalControlNode>()) {
BasicBlock* target =
control_node->Cast<UnconditionalControlNode>()->target();
std::set<size_t> arrows_starting_here;
has_fallthrough |= !AddTargetIfNotNext(targets, target, state.next_block(),
&arrows_starting_here);
PrintVerticalArrows(os_, targets, arrows_starting_here);
PrintPaddedId(os_, graph_labeller, control_node,
has_fallthrough ? " " : "");
} else if (control_node->Is<ConditionalControlNode>()) {
BasicBlock* true_target =
control_node->Cast<ConditionalControlNode>()->if_true();
BasicBlock* false_target =
control_node->Cast<ConditionalControlNode>()->if_false();
std::set<size_t> arrows_starting_here;
has_fallthrough |= !AddTargetIfNotNext(
targets, false_target, state.next_block(), &arrows_starting_here);
has_fallthrough |= !AddTargetIfNotNext(
targets, true_target, state.next_block(), &arrows_starting_here);
PrintVerticalArrows(os_, targets, arrows_starting_here);
PrintPaddedId(os_, graph_labeller, control_node, "");
} else {
PrintVerticalArrows(os_, targets);
PrintPaddedId(os_, graph_labeller, control_node);
}
os_ << PrintNode(graph_labeller, control_node) << "\n";
bool printed_phis = false;
if (control_node->Is<UnconditionalControlNode>()) {
BasicBlock* target =
control_node->Cast<UnconditionalControlNode>()->target();
if (target->has_phi()) {
printed_phis = true;
PrintVerticalArrows(os_, targets);
PrintPadding(os_, graph_labeller, -1);
os_ << (has_fallthrough ? "" : " ");
os_ << " with gap moves:\n";
int pid = state.block()->predecessor_id();
for (Phi* phi : *target->phis()) {
PrintVerticalArrows(os_, targets);
PrintPadding(os_, graph_labeller, -1);
os_ << (has_fallthrough ? "" : " ");
os_ << " - ";
graph_labeller->PrintInput(os_, phi->input(pid));
os_ << "" << graph_labeller->NodeId(phi) << ": Phi "
<< phi->result().operand() << "\n";
}
}
}
PrintVerticalArrows(os_, targets);
if (has_fallthrough) {
PrintPadding(os_, graph_labeller, -1);
if (printed_phis) {
os_ << "";
} else {
os_ << "";
}
}
os_ << "\n";
// TODO(leszeks): Allow MaglevPrintingVisitorOstream to print the arrowhead
// so that it overlaps the fallthrough arrow.
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
->set_padding(graph_labeller->max_node_id_width() + 4);
}
void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit,
Graph* const graph) {
GraphProcessor<MaglevPrintingVisitor> printer(compilation_unit, os);
printer.ProcessGraph(graph);
}
void PrintNode::Print(std::ostream& os) const {
node_->Print(os, graph_labeller_);
}
std::ostream& operator<<(std::ostream& os, const PrintNode& printer) {
printer.Print(os);
return os;
}
void PrintNodeLabel::Print(std::ostream& os) const {
graph_labeller_->PrintNodeLabel(os, node_);
}
std::ostream& operator<<(std::ostream& os, const PrintNodeLabel& printer) {
printer.Print(os);
return os;
}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,84 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
#define V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
#include <ostream>
#include <set>
#include <vector>
namespace v8 {
namespace internal {
namespace maglev {
class BasicBlock;
class ControlNode;
class Graph;
struct MaglevCompilationUnit;
class MaglevGraphLabeller;
class NodeBase;
class Node;
class Phi;
class ProcessingState;
class MaglevPrintingVisitor {
public:
// Could be interesting to print checkpoints too.
static constexpr bool kNeedsCheckpointStates = false;
explicit MaglevPrintingVisitor(std::ostream& os);
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block);
void Process(Phi* phi, const ProcessingState& state);
void Process(Node* node, const ProcessingState& state);
void Process(ControlNode* node, const ProcessingState& state);
std::ostream& os() { return *os_for_additional_info_; }
private:
std::ostream& os_;
std::unique_ptr<std::ostream> os_for_additional_info_;
std::set<BasicBlock*> loop_headers;
std::vector<BasicBlock*> targets;
};
void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit,
Graph* const graph);
class PrintNode {
public:
PrintNode(MaglevGraphLabeller* graph_labeller, const NodeBase* node)
: graph_labeller_(graph_labeller), node_(node) {}
void Print(std::ostream& os) const;
private:
MaglevGraphLabeller* graph_labeller_;
const NodeBase* node_;
};
std::ostream& operator<<(std::ostream& os, const PrintNode& printer);
class PrintNodeLabel {
public:
PrintNodeLabel(MaglevGraphLabeller* graph_labeller, const Node* node)
: graph_labeller_(graph_labeller), node_(node) {}
void Print(std::ostream& os) const;
private:
MaglevGraphLabeller* graph_labeller_;
const Node* node_;
};
std::ostream& operator<<(std::ostream& os, const PrintNodeLabel& printer);
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_

View File

@ -0,0 +1,423 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_GRAPH_PROCESSOR_H_
#define V8_MAGLEV_MAGLEV_GRAPH_PROCESSOR_H_
#include "src/compiler/bytecode-analysis.h"
#include "src/maglev/maglev-basic-block.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
// The GraphProcessor takes a NodeProcessor, and applies it to each Node in the
// Graph by calling NodeProcessor::Process on each Node.
//
// The GraphProcessor also keeps track of the current ProcessingState, including
// the inferred corresponding InterpreterFrameState and (optionally) the state
// at the most recent Checkpoint, and passes this to the Process method.
//
// It expects a NodeProcessor class with:
//
// // True if the GraphProcessor should snapshot Checkpoint states for
// // deopting nodes.
// static constexpr bool kNeedsCheckpointStates;
//
// // A function that processes the graph before the nodes are walked.
// void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
//
// // A function that processes the graph after the nodes are walked.
// void PostProcessGraph(MaglevCompilationUnit*, Graph* graph);
//
// // A function that processes each basic block before its nodes are walked.
// void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block);
//
// // Process methods for each Node type. The GraphProcessor switches over
// // the Node's opcode, casts it to the appropriate FooNode, and dispatches
// // to NodeProcessor::Process. It's then up to the NodeProcessor to provide
// // either distinct Process methods per Node type, or using templates or
// // overloading as appropriate to group node processing.
// void Process(FooNode* node, const ProcessingState& state) {}
//
template <typename NodeProcessor>
class GraphProcessor;
class ProcessingState {
public:
explicit ProcessingState(MaglevCompilationUnit* compilation_unit,
BlockConstIterator block_it,
const InterpreterFrameState* interpreter_frame_state,
const Checkpoint* checkpoint,
const InterpreterFrameState* checkpoint_frame_state)
: compilation_unit_(compilation_unit),
block_it_(block_it),
interpreter_frame_state_(interpreter_frame_state),
checkpoint_(checkpoint),
checkpoint_frame_state_(checkpoint_frame_state) {}
// Disallow copies, since the underlying frame states stay mutable.
ProcessingState(const ProcessingState&) = delete;
ProcessingState& operator=(const ProcessingState&) = delete;
BasicBlock* block() const { return *block_it_; }
BasicBlock* next_block() const { return *(block_it_ + 1); }
const InterpreterFrameState* interpreter_frame_state() const {
DCHECK_NOT_NULL(interpreter_frame_state_);
return interpreter_frame_state_;
}
const Checkpoint* checkpoint() const {
DCHECK_NOT_NULL(checkpoint_);
return checkpoint_;
}
const InterpreterFrameState* checkpoint_frame_state() const {
DCHECK_NOT_NULL(checkpoint_frame_state_);
return checkpoint_frame_state_;
}
int register_count() const { return compilation_unit_->register_count(); }
int parameter_count() const { return compilation_unit_->parameter_count(); }
MaglevGraphLabeller* graph_labeller() const {
return compilation_unit_->graph_labeller();
}
private:
MaglevCompilationUnit* compilation_unit_;
BlockConstIterator block_it_;
const InterpreterFrameState* interpreter_frame_state_;
const Checkpoint* checkpoint_;
const InterpreterFrameState* checkpoint_frame_state_;
};
template <typename NodeProcessor>
class GraphProcessor {
public:
static constexpr bool kNeedsCheckpointStates =
NodeProcessor::kNeedsCheckpointStates;
template <typename... Args>
explicit GraphProcessor(MaglevCompilationUnit* compilation_unit,
Args&&... args)
: compilation_unit_(compilation_unit),
node_processor_(std::forward<Args>(args)...),
current_frame_state_(*compilation_unit_) {
if (kNeedsCheckpointStates) {
checkpoint_state_.emplace(*compilation_unit_);
}
}
void ProcessGraph(Graph* graph) {
graph_ = graph;
node_processor_.PreProcessGraph(compilation_unit_, graph);
for (block_it_ = graph->begin(); block_it_ != graph->end(); ++block_it_) {
BasicBlock* block = *block_it_;
node_processor_.PreProcessBasicBlock(compilation_unit_, block);
if (block->has_state()) {
current_frame_state_.CopyFrom(*compilation_unit_, *block->state());
if (kNeedsCheckpointStates) {
checkpoint_state_->last_checkpoint_block_it = block_it_;
checkpoint_state_->last_checkpoint_node_it = NodeConstIterator();
}
}
if (block->has_phi()) {
for (Phi* phi : *block->phis()) {
node_processor_.Process(phi, GetCurrentState());
}
}
for (node_it_ = block->nodes().begin(); node_it_ != block->nodes().end();
++node_it_) {
Node* node = *node_it_;
ProcessNodeBase(node, GetCurrentState());
}
ProcessNodeBase(block->control_node(), GetCurrentState());
}
node_processor_.PostProcessGraph(compilation_unit_, graph);
}
NodeProcessor& node_processor() { return node_processor_; }
const NodeProcessor& node_processor() const { return node_processor_; }
private:
ProcessingState GetCurrentState() {
return ProcessingState(
compilation_unit_, block_it_, &current_frame_state_,
kNeedsCheckpointStates ? checkpoint_state_->latest_checkpoint : nullptr,
kNeedsCheckpointStates ? &checkpoint_state_->checkpoint_frame_state
: nullptr);
}
void ProcessNodeBase(NodeBase* node, const ProcessingState& state) {
switch (node->opcode()) {
#define CASE(OPCODE) \
case Opcode::k##OPCODE: \
PreProcess(node->Cast<OPCODE>(), state); \
node_processor_.Process(node->Cast<OPCODE>(), state); \
break;
NODE_BASE_LIST(CASE)
#undef CASE
}
}
void PreProcess(NodeBase* node, const ProcessingState& state) {}
void PreProcess(Checkpoint* checkpoint, const ProcessingState& state) {
current_frame_state_.set_accumulator(checkpoint->accumulator());
if (kNeedsCheckpointStates) {
checkpoint_state_->latest_checkpoint = checkpoint;
if (checkpoint->is_used()) {
checkpoint_state_->checkpoint_frame_state.CopyFrom(
*compilation_unit_, current_frame_state_);
checkpoint_state_->last_checkpoint_block_it = block_it_;
checkpoint_state_->last_checkpoint_node_it = node_it_;
ClearDeadCheckpointNodes();
}
}
}
void PreProcess(StoreToFrame* store_to_frame, const ProcessingState& state) {
current_frame_state_.set(store_to_frame->target(), store_to_frame->value());
}
void PreProcess(SoftDeopt* node, const ProcessingState& state) {
PreProcessDeoptingNode();
}
void PreProcess(CheckMaps* node, const ProcessingState& state) {
PreProcessDeoptingNode();
}
void PreProcessDeoptingNode() {
if (!kNeedsCheckpointStates) return;
Checkpoint* checkpoint = checkpoint_state_->latest_checkpoint;
if (checkpoint->is_used()) {
DCHECK(!checkpoint_state_->last_checkpoint_node_it.is_null());
DCHECK_EQ(checkpoint, *checkpoint_state_->last_checkpoint_node_it);
return;
}
DCHECK_IMPLIES(!checkpoint_state_->last_checkpoint_node_it.is_null(),
checkpoint != *checkpoint_state_->last_checkpoint_node_it);
// TODO(leszeks): The following code is _ugly_, should figure out how to
// clean it up.
// Go to the previous state checkpoint (either on the Checkpoint that
// provided the current checkpoint snapshot, or on a BasicBlock).
BlockConstIterator block_it = checkpoint_state_->last_checkpoint_block_it;
NodeConstIterator node_it = checkpoint_state_->last_checkpoint_node_it;
if (node_it.is_null()) {
// There was no recent enough Checkpoint node, and the block iterator
// points at a basic block with a state snapshot. Copy that snapshot and
// start iterating from there.
BasicBlock* block = *block_it;
DCHECK(block->has_state());
checkpoint_state_->checkpoint_frame_state.CopyFrom(*compilation_unit_,
*block->state());
// Start iterating from the first node in the block.
node_it = block->nodes().begin();
} else {
// The node iterator should point at the previous Checkpoint node. We
// don't need that Checkpoint state snapshot anymore, we're making a new
// one, so we can just reuse the snapshot as-is without copying it.
DCHECK_NE(*node_it, checkpoint);
DCHECK((*node_it)->Is<Checkpoint>());
DCHECK((*node_it)->Cast<Checkpoint>()->is_used());
// Advance it by one since we don't need to check this node anymore.
++node_it;
}
// Now walk forward to the checkpoint, and apply any StoreToFrame operations
// along the way into the snapshotted checkpoint state.
BasicBlock* block = *block_it;
while (true) {
// Check if we've run out of nodes in this block, and advance to the
// next block if so.
while (node_it == block->nodes().end()) {
DCHECK_NE(block_it, graph_->end());
// We should only end up visiting blocks with fallthrough to the next
// block -- otherwise, the block should have had a frame state snapshot,
// as either a merge block or a non-fallthrough jump target.
if ((*block_it)->control_node()->Is<Jump>()) {
DCHECK_EQ((*block_it)->control_node()->Cast<Jump>()->target(),
*(block_it + 1));
} else {
DCHECK_IMPLIES((*block_it)
->control_node()
->Cast<ConditionalControlNode>()
->if_true() != *(block_it + 1),
(*block_it)
->control_node()
->Cast<ConditionalControlNode>()
->if_false() != *(block_it + 1));
}
// Advance to the next block (which the above DCHECKs confirm is the
// unconditional fallthrough from the previous block), and update the
// cached block pointer.
block_it++;
block = *block_it;
// We should never visit a block with state (aside from the very first
// block we visit), since then that should have been our start point
// to start with.
DCHECK(!(*block_it)->has_state());
node_it = (*block_it)->nodes().begin();
}
// We should never reach the current node, the "until" checkpoint node
// should be before it.
DCHECK_NE(node_it, node_it_);
Node* node = *node_it;
// Break once we hit the given Checkpoint node. This could be right at
// the start of the iteration, if the BasicBlock held the snapshot and the
// Checkpoint was the first node in it.
if (node == checkpoint) break;
// Update the state from the current node, if it's a state update.
if (node->Is<StoreToFrame>()) {
StoreToFrame* store_to_frame = node->Cast<StoreToFrame>();
checkpoint_state_->checkpoint_frame_state.set(store_to_frame->target(),
store_to_frame->value());
} else {
// Any checkpoints we meet along the way should be unused, otherwise
// they should have provided the most recent state snapshot.
DCHECK_IMPLIES(node->Is<Checkpoint>(),
!node->Cast<Checkpoint>()->is_used());
}
// Continue to the next node.
++node_it;
}
checkpoint_state_->last_checkpoint_block_it = block_it;
checkpoint_state_->last_checkpoint_node_it = node_it;
checkpoint_state_->checkpoint_frame_state.set_accumulator(
checkpoint->accumulator());
ClearDeadCheckpointNodes();
checkpoint->SetUsed();
}
// Walk the checkpointed state, and null out any values that are dead at this
// checkpoint.
// TODO(leszeks): Consider doing this on checkpoint copy, not as a
// post-process step.
void ClearDeadCheckpointNodes() {
const compiler::BytecodeLivenessState* liveness =
bytecode_analysis().GetInLivenessFor(
checkpoint_state_->latest_checkpoint->bytecode_position());
for (int i = 0; i < register_count(); ++i) {
if (!liveness->RegisterIsLive(i)) {
checkpoint_state_->checkpoint_frame_state.set(interpreter::Register(i),
nullptr);
}
}
// The accumulator is on the checkpoint node itself, and should have already
// been nulled out during graph building if it's dead.
DCHECK_EQ(
!liveness->AccumulatorIsLive(),
checkpoint_state_->checkpoint_frame_state.accumulator() == nullptr);
}
int register_count() const { return compilation_unit_->register_count(); }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis;
}
MaglevCompilationUnit* const compilation_unit_;
NodeProcessor node_processor_;
Graph* graph_;
BlockConstIterator block_it_;
NodeConstIterator node_it_;
InterpreterFrameState current_frame_state_;
// The CheckpointState field only exists if the node processor needs
// checkpoint states.
struct CheckpointState {
explicit CheckpointState(const MaglevCompilationUnit& compilation_unit)
: checkpoint_frame_state(compilation_unit) {}
Checkpoint* latest_checkpoint = nullptr;
BlockConstIterator last_checkpoint_block_it;
NodeConstIterator last_checkpoint_node_it;
InterpreterFrameState checkpoint_frame_state;
};
base::Optional<CheckpointState> checkpoint_state_;
};
// A NodeProcessor that wraps multiple NodeProcessors, and forwards to each of
// them iteratively.
template <typename... Processors>
class NodeMultiProcessor;
template <>
class NodeMultiProcessor<> {
public:
static constexpr bool kNeedsCheckpointStates = false;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
void Process(NodeBase* node, const ProcessingState& state) {}
};
template <typename Processor, typename... Processors>
class NodeMultiProcessor<Processor, Processors...>
: NodeMultiProcessor<Processors...> {
using Base = NodeMultiProcessor<Processors...>;
public:
static constexpr bool kNeedsCheckpointStates =
Processor::kNeedsCheckpointStates || Base::kNeedsCheckpointStates;
template <typename Node>
void Process(Node* node, const ProcessingState& state) {
processor_.Process(node, state);
Base::Process(node, state);
}
void PreProcessGraph(MaglevCompilationUnit* unit, Graph* graph) {
processor_.PreProcessGraph(unit, graph);
Base::PreProcessGraph(unit, graph);
}
void PostProcessGraph(MaglevCompilationUnit* unit, Graph* graph) {
// Post process in reverse order because that kind of makes sense.
Base::PostProcessGraph(unit, graph);
processor_.PostProcessGraph(unit, graph);
}
void PreProcessBasicBlock(MaglevCompilationUnit* unit, BasicBlock* block) {
processor_.PreProcessBasicBlock(unit, block);
Base::PreProcessBasicBlock(unit, block);
}
private:
Processor processor_;
};
template <typename... Processors>
using GraphMultiProcessor = GraphProcessor<NodeMultiProcessor<Processors...>>;
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_GRAPH_PROCESSOR_H_

54
src/maglev/maglev-graph.h Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_GRAPH_H_
#define V8_MAGLEV_MAGLEV_GRAPH_H_
#include <vector>
#include "src/maglev/maglev-basic-block.h"
namespace v8 {
namespace internal {
namespace maglev {
using BlockConstIterator = std::vector<BasicBlock*>::const_iterator;
using BlockConstReverseIterator =
std::vector<BasicBlock*>::const_reverse_iterator;
class Graph {
public:
explicit Graph(Zone* zone) : blocks_(zone) {}
BasicBlock* operator[](int i) { return blocks_[i]; }
const BasicBlock* operator[](int i) const { return blocks_[i]; }
int num_blocks() const { return static_cast<int>(blocks_.size()); }
BlockConstIterator begin() const { return blocks_.begin(); }
BlockConstIterator end() const { return blocks_.end(); }
BlockConstReverseIterator rbegin() const { return blocks_.rbegin(); }
BlockConstReverseIterator rend() const { return blocks_.rend(); }
BasicBlock* last_block() const { return blocks_.back(); }
void Add(BasicBlock* block) { blocks_.push_back(block); }
uint32_t stack_slots() const { return stack_slots_; }
void set_stack_slots(uint32_t stack_slots) {
DCHECK_EQ(kMaxUInt32, stack_slots_);
DCHECK_NE(kMaxUInt32, stack_slots);
stack_slots_ = stack_slots;
}
private:
uint32_t stack_slots_ = kMaxUInt32;
ZoneVector<BasicBlock*> blocks_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_GRAPH_H_

View File

@ -0,0 +1,363 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_INTERPRETER_FRAME_STATE_H_
#define V8_MAGLEV_MAGLEV_INTERPRETER_FRAME_STATE_H_
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/bytecode-liveness-map.h"
#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc-data.h"
#include "src/maglev/maglev-register-frame-array.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace maglev {
class BasicBlock;
class MergePointInterpreterFrameState;
class InterpreterFrameState {
public:
explicit InterpreterFrameState(const MaglevCompilationUnit& info)
: frame_(info) {}
InterpreterFrameState(const MaglevCompilationUnit& info,
const InterpreterFrameState& state)
: accumulator_(state.accumulator_), frame_(info) {
frame_.CopyFrom(info, state.frame_, nullptr);
}
void CopyFrom(const MaglevCompilationUnit& info,
const InterpreterFrameState& state) {
accumulator_ = state.accumulator_;
frame_.CopyFrom(info, state.frame_, nullptr);
}
inline void CopyFrom(const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state);
void set_accumulator(ValueNode* value) { accumulator_ = value; }
ValueNode* accumulator() const { return accumulator_; }
void set(interpreter::Register reg, ValueNode* value) {
DCHECK_IMPLIES(reg.is_parameter(),
reg == interpreter::Register::current_context() ||
reg.ToParameterIndex() >= 0);
frame_[reg] = value;
}
ValueNode* get(interpreter::Register reg) const {
DCHECK_IMPLIES(reg.is_parameter(),
reg == interpreter::Register::current_context() ||
reg.ToParameterIndex() >= 0);
return frame_[reg];
}
const RegisterFrameArray<ValueNode*>& frame() const { return frame_; }
private:
ValueNode* accumulator_ = nullptr;
RegisterFrameArray<ValueNode*> frame_;
};
class MergePointInterpreterFrameState {
public:
void CheckIsLoopPhiIfNeeded(const MaglevCompilationUnit& compilation_unit,
int merge_offset, interpreter::Register reg,
ValueNode* value) {
#ifdef DEBUG
if (!compilation_unit.bytecode_analysis.IsLoopHeader(merge_offset)) return;
auto& assignments =
compilation_unit.bytecode_analysis.GetLoopInfoFor(merge_offset)
.assignments();
if (reg.is_parameter()) {
if (!assignments.ContainsParameter(reg.ToParameterIndex())) return;
} else {
DCHECK(compilation_unit.bytecode_analysis.GetInLivenessFor(merge_offset)
->RegisterIsLive(reg.index()));
if (!assignments.ContainsLocal(reg.index())) return;
}
DCHECK(value->Is<Phi>());
#endif
}
MergePointInterpreterFrameState(
const MaglevCompilationUnit& info, const InterpreterFrameState& state,
int merge_offset, int predecessor_count, BasicBlock* predecessor,
const compiler::BytecodeLivenessState* liveness)
: predecessor_count_(predecessor_count),
predecessors_so_far_(1),
live_registers_and_accumulator_(
info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
liveness_(liveness),
predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
int live_index = 0;
ForEachRegister(info, [&](interpreter::Register reg) {
live_registers_and_accumulator_[live_index++] = state.get(reg);
});
if (liveness_->AccumulatorIsLive()) {
live_registers_and_accumulator_[live_index++] = state.accumulator();
}
predecessors_[0] = predecessor;
}
MergePointInterpreterFrameState(
const MaglevCompilationUnit& info, int merge_offset,
int predecessor_count, const compiler::BytecodeLivenessState* liveness,
const compiler::LoopInfo* loop_info)
: predecessor_count_(predecessor_count),
predecessors_so_far_(1),
live_registers_and_accumulator_(
info.zone()->NewArray<ValueNode*>(SizeFor(info, liveness))),
liveness_(liveness),
predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)) {
int live_index = 0;
auto& assignments = loop_info->assignments();
ForEachParameter(info, [&](interpreter::Register reg) {
ValueNode* value = nullptr;
if (assignments.ContainsParameter(reg.ToParameterIndex())) {
value = NewLoopPhi(info.zone(), reg, merge_offset, value);
}
live_registers_and_accumulator_[live_index++] = value;
});
ForEachLocal([&](interpreter::Register reg) {
ValueNode* value = nullptr;
if (assignments.ContainsLocal(reg.index())) {
value = NewLoopPhi(info.zone(), reg, merge_offset, value);
}
live_registers_and_accumulator_[live_index++] = value;
});
DCHECK(!liveness_->AccumulatorIsLive());
#ifdef DEBUG
predecessors_[0] = nullptr;
#endif
}
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
void Merge(const MaglevCompilationUnit& compilation_unit,
const InterpreterFrameState& unmerged, BasicBlock* predecessor,
int merge_offset) {
DCHECK_GT(predecessor_count_, 1);
DCHECK_LT(predecessors_so_far_, predecessor_count_);
predecessors_[predecessors_so_far_] = predecessor;
ForEachValue(
compilation_unit, [&](interpreter::Register reg, ValueNode*& value) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
value = MergeValue(compilation_unit.zone(), reg, value,
unmerged.get(reg), merge_offset);
});
predecessors_so_far_++;
DCHECK_LE(predecessors_so_far_, predecessor_count_);
}
RegisterState* register_state() { return register_values_; }
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
void MergeLoop(const MaglevCompilationUnit& compilation_unit,
const InterpreterFrameState& loop_end_state,
BasicBlock* loop_end_block, int merge_offset) {
DCHECK_EQ(predecessors_so_far_, predecessor_count_);
DCHECK_NULL(predecessors_[0]);
predecessors_[0] = loop_end_block;
ForEachValue(
compilation_unit, [&](interpreter::Register reg, ValueNode* value) {
CheckIsLoopPhiIfNeeded(compilation_unit, merge_offset, reg, value);
MergeLoopValue(compilation_unit.zone(), reg, value,
loop_end_state.get(reg), merge_offset);
});
DCHECK(!liveness_->AccumulatorIsLive());
}
bool has_phi() const { return !phis_.is_empty(); }
Phi::List* phis() { return &phis_; }
void SetPhis(Phi::List&& phis) {
// Move the collected phis to the live interpreter frame.
DCHECK(phis_.is_empty());
phis_.MoveTail(&phis, phis.begin());
}
int predecessor_count() const { return predecessor_count_; }
BasicBlock* predecessor_at(int i) const {
DCHECK_EQ(predecessors_so_far_, predecessor_count_);
DCHECK_LT(i, predecessor_count_);
return predecessors_[i];
}
private:
friend void InterpreterFrameState::CopyFrom(
const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state);
ValueNode* MergeValue(Zone* zone, interpreter::Register owner,
ValueNode* merged, ValueNode* unmerged,
int merge_offset) {
// If the merged node is null, this is a pre-created loop header merge
// frame will null values for anything that isn't a loop Phi.
if (merged == nullptr) {
DCHECK_NULL(predecessors_[0]);
DCHECK_EQ(predecessors_so_far_, 1);
return unmerged;
}
Phi* result = merged->TryCast<Phi>();
if (result != nullptr && result->merge_offset() == merge_offset) {
// It's possible that merged == unmerged at this point since loop-phis are
// not dropped if they are only assigned to themselves in the loop.
DCHECK_EQ(result->owner(), owner);
result->set_input(predecessors_so_far_, unmerged);
return result;
}
if (merged == unmerged) return merged;
// Up to this point all predecessors had the same value for this interpreter
// frame slot. Now that we find a distinct value, insert a copy of the first
// value for each predecessor seen so far, in addition to the new value.
// TODO(verwaest): Unclear whether we want this for Maglev: Instead of
// letting the register allocator remove phis, we could always merge through
// the frame slot. In that case we only need the inputs for representation
// selection, and hence could remove duplicate inputs. We'd likely need to
// attach the interpreter register to the phi in that case?
result = Node::New<Phi>(zone, predecessor_count_, owner, merge_offset);
for (int i = 0; i < predecessors_so_far_; i++) result->set_input(i, merged);
result->set_input(predecessors_so_far_, unmerged);
phis_.Add(result);
return result;
}
void MergeLoopValue(Zone* zone, interpreter::Register owner,
ValueNode* merged, ValueNode* unmerged,
int merge_offset) {
Phi* result = merged->TryCast<Phi>();
if (result == nullptr || result->merge_offset() != merge_offset) {
DCHECK_EQ(merged, unmerged);
return;
}
DCHECK_EQ(result->owner(), owner);
// The loop jump is defined to unconditionally be index 0.
#ifdef DEBUG
DCHECK_NULL(result->input(0).node());
#endif
result->set_input(0, unmerged);
}
ValueNode* NewLoopPhi(Zone* zone, interpreter::Register reg, int merge_offset,
ValueNode* initial_value) {
DCHECK_EQ(predecessors_so_far_, 1);
// Create a new loop phi, which for now is empty.
Phi* result = Node::New<Phi>(zone, predecessor_count_, reg, merge_offset);
#ifdef DEBUG
result->set_input(0, nullptr);
#endif
phis_.Add(result);
return result;
}
static int SizeFor(const MaglevCompilationUnit& info,
const compiler::BytecodeLivenessState* liveness) {
return info.parameter_count() + liveness->live_value_count();
}
template <typename Function>
void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) const {
for (int i = 0; i < info.parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
f(reg);
}
}
template <typename Function>
void ForEachParameter(const MaglevCompilationUnit& info, Function&& f) {
for (int i = 0; i < info.parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
f(reg);
}
}
template <typename Function>
void ForEachLocal(Function&& f) const {
for (int register_index : *liveness_) {
interpreter::Register reg = interpreter::Register(register_index);
f(reg);
}
}
template <typename Function>
void ForEachLocal(Function&& f) {
for (int register_index : *liveness_) {
interpreter::Register reg = interpreter::Register(register_index);
f(reg);
}
}
template <typename Function>
void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) {
ForEachParameter(info, f);
ForEachLocal(f);
}
template <typename Function>
void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) const {
ForEachParameter(info, f);
ForEachLocal(f);
}
template <typename Function>
void ForEachValue(const MaglevCompilationUnit& info, Function&& f) {
int live_index = 0;
ForEachRegister(info, [&](interpreter::Register reg) {
f(reg, live_registers_and_accumulator_[live_index++]);
});
if (liveness_->AccumulatorIsLive()) {
f(interpreter::Register::virtual_accumulator(),
live_registers_and_accumulator_[live_index++]);
live_index++;
}
DCHECK_EQ(live_index, SizeFor(info, liveness_));
}
int predecessor_count_;
int predecessors_so_far_;
Phi::List phis_;
ValueNode** live_registers_and_accumulator_;
const compiler::BytecodeLivenessState* liveness_ = nullptr;
BasicBlock** predecessors_;
#define N(V) RegisterState{nullptr},
RegisterState register_values_[kAllocatableGeneralRegisterCount] = {
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(N)};
#undef N
};
void InterpreterFrameState::CopyFrom(
const MaglevCompilationUnit& info,
const MergePointInterpreterFrameState& state) {
int live_index = 0;
state.ForEachRegister(info, [&](interpreter::Register reg) {
frame_[reg] = state.live_registers_and_accumulator_[live_index++];
});
if (state.liveness_->AccumulatorIsLive()) {
accumulator_ = state.live_registers_and_accumulator_[live_index++];
}
}
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_INTERPRETER_FRAME_STATE_H_

848
src/maglev/maglev-ir.cc Normal file
View File

@ -0,0 +1,848 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-ir.h"
#include "src/base/bits.h"
#include "src/base/logging.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/x64/register-x64.h"
#include "src/compiler/backend/instruction.h"
#include "src/ic/handler-configuration.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
#include "src/maglev/maglev-vreg-allocator.h"
namespace v8 {
namespace internal {
namespace maglev {
const char* ToString(Opcode opcode) {
#define DEF_NAME(Name) #Name,
static constexpr const char* const names[] = {NODE_BASE_LIST(DEF_NAME)};
#undef DEF_NAME
return names[static_cast<int>(opcode)];
}
#define __ code_gen_state->masm()->
namespace {
// ---
// Vreg allocation helpers.
// ---
int GetVirtualRegister(Node* node) {
return compiler::UnallocatedOperand::cast(node->result().operand())
.virtual_register();
}
void DefineAsRegister(MaglevVregAllocationState* vreg_state, Node* node) {
node->result().SetUnallocated(
compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
vreg_state->AllocateVirtualRegister());
}
void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
Register reg) {
node->result().SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER,
reg.code(),
vreg_state->AllocateVirtualRegister());
}
void DefineSameAsFirst(MaglevVregAllocationState* vreg_state, Node* node) {
node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
}
void UseRegister(Input& input) {
input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
compiler::UnallocatedOperand::USED_AT_START,
GetVirtualRegister(input.node()));
}
void UseAny(Input& input) {
input.SetUnallocated(
compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
compiler::UnallocatedOperand::USED_AT_START,
GetVirtualRegister(input.node()));
}
void UseFixed(Input& input, Register reg) {
input.SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER, reg.code(),
GetVirtualRegister(input.node()));
}
// ---
// Code gen helpers.
// ---
void PushInput(MaglevCodeGenState* code_gen_state, const Input& input) {
// TODO(leszeks): Consider special casing the value. (Toon: could possibly
// be done through Input directly?)
const compiler::AllocatedOperand& operand =
compiler::AllocatedOperand::cast(input.operand());
if (operand.IsRegister()) {
__ Push(operand.GetRegister());
} else {
DCHECK(operand.IsStackSlot());
__ Push(GetStackSlot(operand));
}
}
// ---
// Deferred code handling.
// ---
// Base case provides an error.
template <typename T, typename Enable = void>
struct CopyForDeferredHelper {
template <typename U>
struct No_Copy_Helper_Implemented_For_Type;
static void Copy(MaglevCompilationUnit* compilation_unit,
No_Copy_Helper_Implemented_For_Type<T>);
};
// Helper for copies by value.
template <typename T, typename Enable = void>
struct CopyForDeferredByValue {
static T Copy(MaglevCompilationUnit* compilation_unit, T node) {
return node;
}
};
// Node pointers are copied by value.
template <typename T>
struct CopyForDeferredHelper<
T*, typename std::enable_if<std::is_base_of<NodeBase, T>::value>::type>
: public CopyForDeferredByValue<T*> {};
// Arithmetic values and enums are copied by value.
template <typename T>
struct CopyForDeferredHelper<
T, typename std::enable_if<std::is_arithmetic<T>::value>::type>
: public CopyForDeferredByValue<T> {};
template <typename T>
struct CopyForDeferredHelper<
T, typename std::enable_if<std::is_enum<T>::value>::type>
: public CopyForDeferredByValue<T> {};
// MaglevCompilationUnits are copied by value.
template <>
struct CopyForDeferredHelper<MaglevCompilationUnit*>
: public CopyForDeferredByValue<MaglevCompilationUnit*> {};
// Machine registers are copied by value.
template <>
struct CopyForDeferredHelper<Register>
: public CopyForDeferredByValue<Register> {};
// InterpreterFrameState is cloned.
template <>
struct CopyForDeferredHelper<const InterpreterFrameState*> {
static const InterpreterFrameState* Copy(
MaglevCompilationUnit* compilation_unit,
const InterpreterFrameState* frame_state) {
return compilation_unit->zone()->New<InterpreterFrameState>(
*compilation_unit, *frame_state);
}
};
template <typename T>
T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T&& value) {
return CopyForDeferredHelper<T>::Copy(compilation_unit,
std::forward<T>(value));
}
template <typename T>
T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T& value) {
return CopyForDeferredHelper<T>::Copy(compilation_unit, value);
}
template <typename T>
T CopyForDeferred(MaglevCompilationUnit* compilation_unit, const T& value) {
return CopyForDeferredHelper<T>::Copy(compilation_unit, value);
}
template <typename Function, typename FunctionPointer = Function>
struct FunctionArgumentsTupleHelper
: FunctionArgumentsTupleHelper<Function,
decltype(&FunctionPointer::operator())> {};
template <typename T, typename C, typename R, typename... A>
struct FunctionArgumentsTupleHelper<T, R (C::*)(A...) const> {
using FunctionPointer = R (*)(A...);
using Tuple = std::tuple<A...>;
static constexpr size_t kSize = sizeof...(A);
};
template <typename T>
struct StripFirstTwoTupleArgs;
template <typename T1, typename T2, typename... T>
struct StripFirstTwoTupleArgs<std::tuple<T1, T2, T...>> {
using Stripped = std::tuple<T...>;
};
template <typename Function>
class DeferredCodeInfoImpl final : public MaglevCodeGenState::DeferredCodeInfo {
public:
using FunctionPointer =
typename FunctionArgumentsTupleHelper<Function>::FunctionPointer;
using Tuple = typename StripFirstTwoTupleArgs<
typename FunctionArgumentsTupleHelper<Function>::Tuple>::Stripped;
static constexpr size_t kSize = FunctionArgumentsTupleHelper<Function>::kSize;
template <typename... InArgs>
explicit DeferredCodeInfoImpl(MaglevCompilationUnit* compilation_unit,
FunctionPointer function, InArgs&&... args)
: function(function),
args(CopyForDeferred(compilation_unit, std::forward<InArgs>(args))...) {
}
DeferredCodeInfoImpl(DeferredCodeInfoImpl&&) = delete;
DeferredCodeInfoImpl(const DeferredCodeInfoImpl&) = delete;
void Generate(MaglevCodeGenState* code_gen_state,
Label* return_label) override {
DoCall(code_gen_state, return_label, std::make_index_sequence<kSize - 2>{});
}
private:
template <size_t... I>
auto DoCall(MaglevCodeGenState* code_gen_state, Label* return_label,
std::index_sequence<I...>) {
// TODO(leszeks): This could be replaced with std::apply in C++17.
return function(code_gen_state, return_label, std::get<I>(args)...);
}
FunctionPointer function;
Tuple args;
};
template <typename Function, typename... Args>
void JumpToDeferredIf(Condition cond, MaglevCodeGenState* code_gen_state,
Function&& deferred_code_gen, Args&&... args) {
using DeferredCodeInfoT = DeferredCodeInfoImpl<Function>;
DeferredCodeInfoT* deferred_code =
code_gen_state->compilation_unit()->zone()->New<DeferredCodeInfoT>(
code_gen_state->compilation_unit(), deferred_code_gen,
std::forward<Args>(args)...);
code_gen_state->PushDeferredCode(deferred_code);
if (FLAG_code_comments) {
__ RecordComment("-- Jump to deferred code");
}
__ j(cond, &deferred_code->deferred_code_label);
__ bind(&deferred_code->return_label);
}
// ---
// Deopt
// ---
void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
int deopt_bytecode_position,
const InterpreterFrameState* checkpoint_state) {
DCHECK(node->properties().can_deopt());
// TODO(leszeks): Extract to separate call, or at the very least defer.
// TODO(leszeks): Stack check.
MaglevCompilationUnit* compilation_unit = code_gen_state->compilation_unit();
int maglev_frame_size = code_gen_state->vreg_slots();
ASM_CODE_COMMENT_STRING(code_gen_state->masm(), "Deoptimize");
__ RecordComment("Push registers and load accumulator");
int num_saved_slots = 0;
// TODO(verwaest): We probably shouldn't be spilling all values that go
// through deopt :)
for (int i = 0; i < compilation_unit->register_count(); ++i) {
ValueNode* node = checkpoint_state->get(interpreter::Register(i));
if (node == nullptr) continue;
__ Push(GetStackSlot(node->spill_slot()));
num_saved_slots++;
}
if (checkpoint_state->accumulator()) {
__ movq(kInterpreterAccumulatorRegister,
GetStackSlot(checkpoint_state->accumulator()->spill_slot()));
}
__ RecordComment("Load registers from extra pushed slots");
int slot = 0;
for (int i = 0; i < compilation_unit->register_count(); ++i) {
ValueNode* node = checkpoint_state->get(interpreter::Register(i));
if (node == nullptr) continue;
__ movq(kScratchRegister, MemOperand(rsp, (num_saved_slots - slot++ - 1) *
kSystemPointerSize));
__ movq(MemOperand(rbp, InterpreterFrameConstants::kRegisterFileFromFp -
i * kSystemPointerSize),
kScratchRegister);
}
DCHECK_EQ(slot, num_saved_slots);
__ RecordComment("Materialize bytecode array and offset");
__ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp),
compilation_unit->bytecode.object());
__ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
Smi::FromInt(deopt_bytecode_position +
(BytecodeArray::kHeaderSize - kHeapObjectTag)));
// Reset rsp to bytecode sized frame.
__ addq(rsp, Immediate((maglev_frame_size + num_saved_slots -
(2 + compilation_unit->register_count())) *
kSystemPointerSize));
__ TailCallBuiltin(Builtin::kBaselineOrInterpreterEnterAtBytecode);
}
void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
const ProcessingState& state) {
EmitDeopt(code_gen_state, node, state.checkpoint()->bytecode_position(),
state.checkpoint_frame_state());
}
// ---
// Print
// ---
void PrintInputs(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeBase* node) {
if (!node->has_inputs()) return;
os << " [";
for (int i = 0; i < node->input_count(); i++) {
if (i != 0) os << ", ";
graph_labeller->PrintInput(os, node->input(i));
}
os << "]";
}
void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeBase* node) {}
void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const ValueNode* node) {
os << "" << node->result().operand();
if (node->has_valid_live_range()) {
os << ", live range: [" << node->live_range().start << "-"
<< node->live_range().end << "]";
}
}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeBase* node) {}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const UnconditionalControlNode* node) {
os << " b" << graph_labeller->BlockId(node->target());
}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const ConditionalControlNode* node) {
os << " b" << graph_labeller->BlockId(node->if_true()) << " b"
<< graph_labeller->BlockId(node->if_false());
}
template <typename NodeT>
void PrintImpl(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeT* node) {
os << node->opcode();
node->PrintParams(os, graph_labeller);
PrintInputs(os, graph_labeller, node);
PrintResult(os, graph_labeller, node);
PrintTargets(os, graph_labeller, node);
}
} // namespace
void NodeBase::Print(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return PrintImpl(os, graph_labeller, this->Cast<Name>());
NODE_BASE_LIST(V)
#undef V
}
UNREACHABLE();
}
// ---
// Nodes
// ---
void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsRegister(vreg_state, this);
}
void SmiConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
__ Move(ToRegister(result()), Immediate(value()));
}
void SmiConstant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << value() << ")";
}
void Checkpoint::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void Checkpoint::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {}
void Checkpoint::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << accumulator() << ")";
}
void SoftDeopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void SoftDeopt::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
EmitDeopt(code_gen_state, this, state);
}
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsRegister(vreg_state, this);
}
void Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
UNREACHABLE();
}
void Constant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << object_ << ")";
}
void InitialValue::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
// TODO(leszeks): Make this nicer.
result().SetUnallocated(compiler::UnallocatedOperand::FIXED_SLOT,
(StandardFrameConstants::kExpressionsOffset -
UnoptimizedFrameConstants::kRegisterFileFromFp) /
kSystemPointerSize +
source().index(),
vreg_state->AllocateVirtualRegister());
}
void InitialValue::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
// No-op, the value is already in the appropriate slot.
}
void InitialValue::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << source().ToString() << ")";
}
void LoadGlobal::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseFixed(context(), kContextRegister);
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
void LoadGlobal::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
// TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
DCHECK_EQ(ToRegister(context()), kContextRegister);
// TODO(jgruber): Detect properly.
const int ic_kind =
static_cast<int>(FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
__ Move(LoadGlobalNoFeedbackDescriptor::GetRegisterParameter(
LoadGlobalNoFeedbackDescriptor::kName),
name().object());
__ Move(LoadGlobalNoFeedbackDescriptor::GetRegisterParameter(
LoadGlobalNoFeedbackDescriptor::kICKind),
Immediate(Smi::FromInt(ic_kind)));
// TODO(jgruber): Implement full LoadGlobal handling.
__ CallBuiltin(Builtin::kLoadGlobalIC_NoFeedback);
}
void LoadGlobal::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << name() << ")";
}
void RegisterInput::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsFixed(vreg_state, this, input());
}
void RegisterInput::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
// Nothing to be done, the value is already in the register.
}
void RegisterInput::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << input() << ")";
}
void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsRegister(vreg_state, this);
}
void RootConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
if (!has_valid_live_range()) return;
Register reg = ToRegister(result());
__ LoadRoot(reg, index());
}
void RootConstant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << RootsTable::name(index()) << ")";
}
void CheckMaps::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(actual_map_input());
set_temporaries_needed(1);
}
void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register object = ToRegister(actual_map_input());
RegList temps = temporaries();
Register map_tmp =
Register::from_code(base::bits::CountTrailingZerosNonZero(temps));
__ LoadMap(map_tmp, object);
__ Cmp(map_tmp, map().object());
// TODO(leszeks): Encode as a bit on CheckMaps.
if (map().object()->is_migration_target()) {
JumpToDeferredIf(
not_equal, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label,
Register object, CheckMaps* node, int checkpoint_position,
const InterpreterFrameState* checkpoint_state_snapshot,
Register map_tmp) {
Label deopt;
// If the map is not deprecated, deopt straight away.
__ movl(kScratchRegister,
FieldOperand(map_tmp, Map::kBitField3Offset));
__ testl(kScratchRegister,
Immediate(Map::Bits3::IsDeprecatedBit::kMask));
__ j(zero, &deopt);
// Otherwise, try migrating the object. If the migration returns Smi
// zero, then it failed and we should deopt.
__ Push(object);
__ Move(kContextRegister,
code_gen_state->broker()->target_native_context().object());
__ CallRuntime(Runtime::kTryMigrateInstance);
__ cmpl(kReturnRegister0, Immediate(0));
__ j(equal, &deopt);
// The migrated object is returned on success, retry the map check.
__ LoadMap(map_tmp, kReturnRegister0);
__ Cmp(map_tmp, node->map().object());
__ j(equal, return_label);
__ bind(&deopt);
EmitDeopt(code_gen_state, node, checkpoint_position,
checkpoint_state_snapshot);
},
object, this, state.checkpoint()->bytecode_position(),
state.checkpoint_frame_state(), map_tmp);
} else {
Label is_ok;
__ j(equal, &is_ok);
EmitDeopt(code_gen_state, this, state);
__ bind(&is_ok);
}
}
void CheckMaps::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << map() << ")";
}
void LoadField::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(object_input());
DefineAsRegister(vreg_state, this);
}
void LoadField::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
// os << "kField, is in object = "
// << LoadHandler::IsInobjectBits::decode(raw_handler)
// << ", is double = " << LoadHandler::IsDoubleBits::decode(raw_handler)
// << ", field index = " <<
// LoadHandler::FieldIndexBits::decode(raw_handler);
Register object = ToRegister(object_input());
int handler = this->handler();
if (LoadHandler::IsInobjectBits::decode(handler)) {
Operand input_field_operand = FieldOperand(
object, LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize);
__ DecompressAnyTagged(ToRegister(result()), input_field_operand);
if (LoadHandler::IsDoubleBits::decode(handler)) {
// TODO(leszeks): Copy out the value, either as a double or a HeapNumber.
UNREACHABLE();
}
} else {
// TODO(leszeks): Handle out-of-object properties.
UNREACHABLE();
}
}
void LoadField::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << std::hex << handler() << std::dec << ")";
}
void LoadNamedGeneric::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(object_input());
DefineAsRegister(vreg_state, this);
}
void LoadNamedGeneric::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
UNREACHABLE();
}
void LoadNamedGeneric::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << name_ << ")";
}
void Increment::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
using D = UnaryOp_WithFeedbackDescriptor;
UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
void Increment::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
using D = UnaryOp_WithFeedbackDescriptor;
DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue));
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
__ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
// TODO(leszeks): Implement full handling.
__ CallBuiltin(Builtin::kIncrement_WithFeedback);
}
void StoreToFrame::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void StoreToFrame::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {}
void StoreToFrame::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << target().ToString() << ""
<< PrintNodeLabel(graph_labeller, value()) << ")";
}
void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UNREACHABLE();
}
void GapMove::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
if (source().IsAnyRegister()) {
Register source_reg = ToRegister(source());
if (target().IsAnyRegister()) {
__ movq(ToRegister(target()), source_reg);
} else {
__ movq(ToMemOperand(target()), source_reg);
}
} else {
MemOperand source_op = ToMemOperand(source());
if (target().IsAnyRegister()) {
__ movq(ToRegister(target()), source_op);
} else {
__ movq(kScratchRegister, source_op);
__ movq(ToMemOperand(target()), kScratchRegister);
}
}
}
void GapMove::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << source() << "" << target() << ")";
}
void Add::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(left_input());
UseRegister(right_input());
DefineSameAsFirst(vreg_state, this);
}
void Add::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
UNREACHABLE();
}
void LessThan::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
using D = BinaryOp_WithFeedbackDescriptor;
UseFixed(left_input(), D::GetRegisterParameter(D::kLeft));
UseFixed(right_input(), D::GetRegisterParameter(D::kRight));
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
void LessThan::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
using D = BinaryOp_WithFeedbackDescriptor;
DCHECK_EQ(ToRegister(left_input()), D::GetRegisterParameter(D::kLeft));
DCHECK_EQ(ToRegister(right_input()), D::GetRegisterParameter(D::kRight));
__ Move(kContextRegister, code_gen_state->native_context().object());
__ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
__ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
// TODO(jgruber): Implement full handling.
__ CallBuiltin(Builtin::kLessThan_WithFeedback);
}
void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
// Phi inputs are processed in the post-process, once loop phis' inputs'
// v-regs are allocated.
result().SetUnallocated(
compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
vreg_state->AllocateVirtualRegister());
}
// TODO(verwaest): Remove after switching the register allocator.
void Phi::AllocateVregInPostProcess(MaglevVregAllocationState* vreg_state) {
for (Input& input : *this) {
UseAny(input);
}
}
void Phi::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DCHECK_EQ(state.interpreter_frame_state()->get(owner()), this);
}
void Phi::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << owner().ToString() << ")";
}
void CallProperty::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseFixed(function(), CallTrampolineDescriptor::GetRegisterParameter(
CallTrampolineDescriptor::kFunction));
UseFixed(context(), kContextRegister);
for (int i = 0; i < num_args(); i++) {
UseAny(arg(i));
}
DefineAsFixed(vreg_state, this, kReturnRegister0);
}
void CallProperty::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
// TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
DCHECK_EQ(ToRegister(function()),
CallTrampolineDescriptor::GetRegisterParameter(
CallTrampolineDescriptor::kFunction));
DCHECK_EQ(ToRegister(context()), kContextRegister);
for (int i = num_args() - 1; i >= 0; --i) {
PushInput(code_gen_state, arg(i));
}
uint32_t arg_count = num_args();
__ Move(CallTrampolineDescriptor::GetRegisterParameter(
CallTrampolineDescriptor::kActualArgumentsCount),
Immediate(arg_count));
// TODO(leszeks): This doesn't collect feedback yet, either pass in the
// feedback vector by Handle.
__ CallBuiltin(Builtin::kCall_ReceiverIsNotNullOrUndefined);
}
void CallUndefinedReceiver::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UNREACHABLE();
}
void CallUndefinedReceiver::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
UNREACHABLE();
}
// ---
// Control nodes
// ---
void Return::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseFixed(value_input(), kReturnRegister0);
}
void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
__ LeaveFrame(StackFrame::BASELINE);
__ Ret(code_gen_state->parameter_count() * kSystemPointerSize,
kScratchRegister);
}
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void Jump::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
// Avoid emitting a jump to the next block.
if (target() != state.next_block()) {
__ jmp(target()->label());
}
}
void JumpLoop::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void JumpLoop::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
__ jmp(target()->label());
}
void BranchIfTrue::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseRegister(condition_input());
}
void BranchIfTrue::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register value = ToRegister(condition_input());
auto* next_block = state.next_block();
// We don't have any branch probability information, so try to jump
// over whatever the next block emitted is.
if (if_false() == next_block) {
// Jump over the false block if true, otherwise fall through into it.
__ JumpIfRoot(value, RootIndex::kTrueValue, if_true()->label());
} else {
// Jump to the false block if true.
__ JumpIfNotRoot(value, RootIndex::kTrueValue, if_false()->label());
// Jump to the true block if it's not the next block.
if (if_true() != next_block) {
__ jmp(if_true()->label());
}
}
}
void BranchIfToBooleanTrue::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
UseFixed(condition_input(),
ToBooleanForBaselineJumpDescriptor::GetRegisterParameter(0));
}
void BranchIfToBooleanTrue::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
DCHECK_EQ(ToRegister(condition_input()),
ToBooleanForBaselineJumpDescriptor::GetRegisterParameter(0));
// ToBooleanForBaselineJump returns the ToBoolean value into return reg 1, and
// the original value into kInterpreterAccumulatorRegister, so we don't have
// to worry about it getting clobbered.
__ CallBuiltin(Builtin::kToBooleanForBaselineJump);
__ SmiCompare(kReturnRegister1, Smi::zero());
auto* next_block = state.next_block();
// We don't have any branch probability information, so try to jump
// over whatever the next block emitted is.
if (if_false() == next_block) {
// Jump over the false block if non zero, otherwise fall through into it.
__ j(not_equal, if_true()->label());
} else {
// Jump to the false block if zero.
__ j(equal, if_false()->label());
// Fall through or jump to the true block.
if (if_true() != next_block) {
__ jmp(if_true()->label());
}
}
}
} // namespace maglev
} // namespace internal
} // namespace v8

1356
src/maglev/maglev-ir.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,103 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
#define V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
#include "src/codegen/x64/register-x64.h"
#include "src/compiler/backend/instruction.h"
#include "src/utils/pointer-with-payload.h"
namespace v8 {
namespace internal {
namespace maglev {
struct LiveNodeInfo;
#define COUNT(V) +1
static constexpr int kAllocatableGeneralRegisterCount =
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(COUNT);
#undef COUNT
constexpr uint8_t MapRegisterToIndex(Register r) {
uint8_t count = 0;
#define EMIT_BRANCH(V) \
if (r == V) return count; \
count++;
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(EMIT_BRANCH)
#undef EMIT_BRANCH
UNREACHABLE();
}
constexpr Register MapIndexToRegister(int i) {
uint8_t count = 0;
#define EMIT_BRANCH(V) \
if (i == count) return V; \
count++;
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(EMIT_BRANCH)
#undef EMIT_BRANCH
UNREACHABLE();
}
struct RegisterStateFlags {
// TODO(v8:7700): Use the good old Flags mechanism.
static constexpr int kIsMergeShift = 0;
static constexpr int kIsInitializedShift = 1;
const bool is_initialized = false;
const bool is_merge = false;
explicit constexpr operator uintptr_t() const {
return (is_initialized ? 1 << kIsInitializedShift : 0) |
(is_merge ? 1 << kIsMergeShift : 0);
}
constexpr explicit RegisterStateFlags(uintptr_t state)
: is_initialized((state & (1 << kIsInitializedShift)) != 0),
is_merge((state & (1 << kIsMergeShift)) != 0) {}
constexpr RegisterStateFlags(bool is_initialized, bool is_merge)
: is_initialized(is_initialized), is_merge(is_merge) {}
};
constexpr bool operator==(const RegisterStateFlags& left,
const RegisterStateFlags& right) {
return left.is_initialized == right.is_initialized &&
left.is_merge == right.is_merge;
}
typedef PointerWithPayload<void, RegisterStateFlags, 2> RegisterState;
struct RegisterMerge {
compiler::AllocatedOperand* operands() {
return reinterpret_cast<compiler::AllocatedOperand*>(this + 1);
}
compiler::AllocatedOperand& operand(size_t i) { return operands()[i]; }
LiveNodeInfo* node;
};
inline bool LoadMergeState(RegisterState state, RegisterMerge** merge) {
DCHECK(state.GetPayload().is_initialized);
if (state.GetPayload().is_merge) {
*merge = static_cast<RegisterMerge*>(state.GetPointer());
return true;
}
*merge = nullptr;
return false;
}
inline bool LoadMergeState(RegisterState state, LiveNodeInfo** node,
RegisterMerge** merge) {
DCHECK(state.GetPayload().is_initialized);
if (LoadMergeState(state, merge)) {
*node = (*merge)->node;
return true;
}
*node = static_cast<LiveNodeInfo*>(state.GetPointer());
return false;
}
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_

View File

@ -0,0 +1,914 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-regalloc.h"
#include "src/base/logging.h"
#include "src/compiler/backend/instruction.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
namespace {
constexpr RegisterStateFlags initialized_node{true, false};
constexpr RegisterStateFlags initialized_merge{true, true};
using BlockReverseIterator = std::vector<BasicBlock>::reverse_iterator;
// A target is a fallthrough of a control node if its ID is the next ID
// after the control node.
//
// TODO(leszeks): Consider using the block iterator instead.
bool IsTargetOfNodeFallthrough(ControlNode* node, BasicBlock* target) {
return node->id() + 1 == target->first_id();
}
ControlNode* NearestPostDominatingHole(ControlNode* node) {
// Conditional control nodes don't cause holes themselves. So, the nearest
// post-dominating hole is the conditional control node's next post-dominating
// hole.
if (node->Is<ConditionalControlNode>()) {
return node->next_post_dominating_hole();
}
// If the node is a Jump, it may be a hole, but only if it is not a
// fallthrough (jump to the immediately next block). Otherwise, it will point
// to the nearest post-dominating hole in its own "next" field.
if (Jump* jump = node->TryCast<Jump>()) {
if (IsTargetOfNodeFallthrough(jump, jump->target())) {
return jump->next_post_dominating_hole();
}
}
return node;
}
bool IsLiveAtTarget(LiveNodeInfo* info, ControlNode* source,
BasicBlock* target) {
if (info == nullptr) return false;
// If we're looping, a value can only be live if it was live before the loop.
if (target->control_node()->id() <= source->id()) {
// Gap moves may already be inserted in the target, so skip over those.
return info->node->id() < target->FirstNonGapMoveId();
}
// TODO(verwaest): This should be true but isn't because we don't yet
// eliminate dead code.
// DCHECK_GT(info->next_use, source->id());
// TODO(verwaest): Since we don't support deopt yet we can only deal with
// direct branches. Add support for holes.
return info->last_use >= target->first_id();
}
} // namespace
StraightForwardRegisterAllocator::StraightForwardRegisterAllocator(
MaglevCompilationUnit* compilation_unit, Graph* graph)
: compilation_unit_(compilation_unit) {
ComputePostDominatingHoles(graph);
AllocateRegisters(graph);
graph->set_stack_slots(top_of_stack_);
}
StraightForwardRegisterAllocator::~StraightForwardRegisterAllocator() = default;
// Compute, for all forward control nodes (i.e. excluding Return and JumpLoop) a
// tree of post-dominating control flow holes.
//
// Control flow which interrupts linear control flow fallthrough for basic
// blocks is considered to introduce a control flow "hole".
//
// A──────┐ │
// │ Jump │ │
// └──┬───┘ │
// { │ B──────┐ │
// Control flow { │ │ Jump │ │ Linear control flow
// hole after A { │ └─┬────┘ │
// { ▼ ▼ Fallthrough │
// C──────┐ │
// │Return│ │
// └──────┘ ▼
//
// It is interesting, for each such hole, to know what the next hole will be
// that we will unconditionally reach on our way to an exit node. Such
// subsequent holes are in "post-dominators" of the current block.
//
// As an example, consider the following CFG, with the annotated holes. The
// post-dominating hole tree is the transitive closure of the post-dominator
// tree, up to nodes which are holes (in this example, A, D, F and H).
//
// CFG Immediate Post-dominating
// post-dominators holes
// A──────┐
// │ Jump │ A A
// └──┬───┘ │ │
// { │ B──────┐ │ │
// Control flow { │ │ Jump │ │ B │ B
// hole after A { │ └─┬────┘ │ │ │ │
// { ▼ ▼ │ │ │ │
// C──────┐ │ │ │ │
// │Branch│ └►C◄┘ │ C │
// └┬────┬┘ │ │ │ │
// ▼ │ │ │ │ │
// D──────┐│ │ │ │ │
// │ Jump ││ D │ │ D │ │
// └──┬───┘▼ │ │ │ │ │ │
// { │ E──────┐ │ │ │ │ │ │
// Control flow { │ │ Jump │ │ │ E │ │ │ E │
// hole after D { │ └─┬────┘ │ │ │ │ │ │ │ │
// { ▼ ▼ │ │ │ │ │ │ │ │
// F──────┐ │ ▼ │ │ │ ▼ │ │
// │ Jump │ └►F◄┘ └─┴►F◄┴─┘
// └─────┬┘ │ │
// { │ G──────┐ │ │
// Control flow { │ │ Jump │ │ G │ G
// hole after F { │ └─┬────┘ │ │ │ │
// { ▼ ▼ │ │ │ │
// H──────┐ ▼ │ ▼ │
// │Return│ H◄┘ H◄┘
// └──────┘
//
// Since we only care about forward control, loop jumps are treated the same as
// returns -- they terminate the post-dominating hole chain.
//
void StraightForwardRegisterAllocator::ComputePostDominatingHoles(
Graph* graph) {
// For all blocks, find the list of jumps that jump over code unreachable from
// the block. Such a list of jumps terminates in return or jumploop.
for (BasicBlock* block : base::Reversed(*graph)) {
ControlNode* control = block->control_node();
if (auto node = control->TryCast<Jump>()) {
// If the current control node is a jump, prepend it to the list of jumps
// at the target.
control->set_next_post_dominating_hole(
NearestPostDominatingHole(node->target()->control_node()));
} else if (auto node = control->TryCast<ConditionalControlNode>()) {
ControlNode* first =
NearestPostDominatingHole(node->if_true()->control_node());
ControlNode* second =
NearestPostDominatingHole(node->if_false()->control_node());
// Either find the merge-point of both branches, or the highest reachable
// control-node of the longest branch after the last node of the shortest
// branch.
// As long as there's no merge-point.
while (first != second) {
// Walk the highest branch to find where it goes.
if (first->id() > second->id()) std::swap(first, second);
// If the first branch returns or jumps back, we've found highest
// reachable control-node of the longest branch (the second control
// node).
if (first->Is<Return>() || first->Is<JumpLoop>()) {
control->set_next_post_dominating_hole(second);
break;
}
// Continue one step along the highest branch. This may cross over the
// lowest branch in case it returns or loops. If labelled blocks are
// involved such swapping of which branch is the highest branch can
// occur multiple times until a return/jumploop/merge is discovered.
first = first->next_post_dominating_hole();
}
// Once the branches merged, we've found the gap-chain that's relevant for
// the control node.
control->set_next_post_dominating_hole(first);
}
}
}
void StraightForwardRegisterAllocator::PrintLiveRegs() const {
bool first = true;
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
LiveNodeInfo* info = register_values_[i];
if (info == nullptr) continue;
if (first) {
first = false;
} else {
printing_visitor_->os() << ", ";
}
printing_visitor_->os()
<< MapIndexToRegister(i) << "=v" << info->node->id();
}
}
void StraightForwardRegisterAllocator::AllocateRegisters(Graph* graph) {
if (FLAG_trace_maglev_regalloc) {
printing_visitor_.reset(new MaglevPrintingVisitor(std::cout));
printing_visitor_->PreProcessGraph(compilation_unit_, graph);
}
for (block_it_ = graph->begin(); block_it_ != graph->end(); ++block_it_) {
BasicBlock* block = *block_it_;
// Restore mergepoint state.
if (block->has_state()) {
InitializeRegisterValues(block->state()->register_state());
}
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->PreProcessBasicBlock(compilation_unit_, block);
printing_visitor_->os() << "live regs: ";
PrintLiveRegs();
ControlNode* control = NearestPostDominatingHole(block->control_node());
if (!control->Is<JumpLoop>()) {
printing_visitor_->os() << "\n[holes:";
while (true) {
if (control->Is<Jump>()) {
BasicBlock* target = control->Cast<Jump>()->target();
printing_visitor_->os()
<< " " << control->id() << "-" << target->first_id();
control = control->next_post_dominating_hole();
DCHECK_NOT_NULL(control);
continue;
} else if (control->Is<Return>()) {
printing_visitor_->os() << " " << control->id() << ".";
break;
} else if (control->Is<JumpLoop>()) {
printing_visitor_->os() << " " << control->id() << "";
break;
}
UNREACHABLE();
}
printing_visitor_->os() << "]";
}
printing_visitor_->os() << std::endl;
}
// Activate phis.
if (block->has_phi()) {
// Firstly, make the phi live, and try to assign it to an input
// location.
for (Phi* phi : *block->phis()) {
phi->SetNoSpillOrHint();
LiveNodeInfo* info = MakeLive(phi);
TryAllocateToInput(info, phi);
}
// Secondly try to assign the phi to a free register.
for (Phi* phi : *block->phis()) {
if (phi->result().operand().IsAllocated()) continue;
compiler::InstructionOperand allocation =
TryAllocateRegister(&values_[phi]);
if (allocation.IsAllocated()) {
phi->result().SetAllocated(
compiler::AllocatedOperand::cast(allocation));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
phi, ProcessingState(compilation_unit_, block_it_, nullptr,
nullptr, nullptr));
printing_visitor_->os()
<< "phi (new reg) " << phi->result().operand() << std::endl;
}
}
}
// Finally just use a stack slot.
for (Phi* phi : *block->phis()) {
if (phi->result().operand().IsAllocated()) continue;
LiveNodeInfo& info = values_[phi];
AllocateSpillSlot(&info);
// TODO(verwaest): Will this be used at all?
phi->result().SetAllocated(info.stack_slot->slot);
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
phi, ProcessingState(compilation_unit_, block_it_, nullptr,
nullptr, nullptr));
printing_visitor_->os()
<< "phi (stack) " << phi->result().operand() << std::endl;
}
}
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->os() << "live regs: ";
PrintLiveRegs();
printing_visitor_->os() << std::endl;
}
}
node_it_ = block->nodes().begin();
for (; node_it_ != block->nodes().end(); ++node_it_) {
AllocateNode(*node_it_);
}
AllocateControlNode(block->control_node(), block);
}
}
void StraightForwardRegisterAllocator::UpdateInputUseAndClearDead(
uint32_t use, const Input& input) {
ValueNode* node = input.node();
auto it = values_.find(node);
// If a value is dead, free it.
if (node->live_range().end == use) {
// There were multiple uses in this node.
if (it == values_.end()) return;
DCHECK_NE(it, values_.end());
LiveNodeInfo& info = it->second;
// TODO(jgruber,v8:7700): Instead of looping over all register values to
// find possible references, clear register values more efficiently.
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
if (register_values_[i] != &info) continue;
register_values_[i] = nullptr;
}
// If the stack slot is a local slot, free it so it can be reused.
if (info.stack_slot != nullptr && info.stack_slot->slot.index() > 0) {
free_slots_.Add(info.stack_slot);
}
values_.erase(it);
return;
}
// Otherwise update the next use.
DCHECK_NE(it, values_.end());
it->second.next_use = input.next_use_id();
}
void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
for (Input& input : *node) AssignInput(input);
AssignTemporaries(node);
for (Input& input : *node) UpdateInputUseAndClearDead(node->id(), input);
if (node->properties().is_call()) SpillAndClearRegisters();
// TODO(verwaest): This isn't a good idea :)
if (node->properties().can_deopt()) SpillRegisters();
// Allocate node output.
if (node->Is<ValueNode>()) AllocateNodeResult(node->Cast<ValueNode>());
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
nullptr));
printing_visitor_->os() << "live regs: ";
PrintLiveRegs();
printing_visitor_->os() << "\n";
}
}
void StraightForwardRegisterAllocator::AllocateNodeResult(ValueNode* node) {
LiveNodeInfo* info = MakeLive(node);
DCHECK(!node->Is<Phi>());
node->SetNoSpillOrHint();
compiler::UnallocatedOperand operand =
compiler::UnallocatedOperand::cast(node->result().operand());
if (operand.basic_policy() == compiler::UnallocatedOperand::FIXED_SLOT) {
DCHECK(node->Is<InitialValue>());
DCHECK_LT(operand.fixed_slot_index(), 0);
// Set the stack slot to exactly where the value is.
info->stack_slot = compilation_unit_->zone()->New<StackSlot>(
MachineRepresentation::kTagged, operand.fixed_slot_index());
node->result().SetAllocated(info->stack_slot->slot);
return;
}
switch (operand.extended_policy()) {
case compiler::UnallocatedOperand::FIXED_REGISTER: {
Register r = Register::from_code(operand.fixed_register_index());
node->result().SetAllocated(ForceAllocate(r, info));
break;
}
case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
node->result().SetAllocated(AllocateRegister(info));
break;
case compiler::UnallocatedOperand::SAME_AS_INPUT: {
Input& input = node->input(operand.input_index());
Register r = input.AssignedRegister();
node->result().SetAllocated(ForceAllocate(r, info));
break;
}
case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
case compiler::UnallocatedOperand::NONE:
case compiler::UnallocatedOperand::FIXED_FP_REGISTER:
case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
UNREACHABLE();
}
}
void StraightForwardRegisterAllocator::Free(const Register& reg,
bool try_move) {
int index = MapRegisterToIndex(reg);
LiveNodeInfo* info = register_values_[index];
// If the register is already free, return.
if (info == nullptr) return;
register_values_[index] = nullptr;
// If the value we're freeing from the register is already known to be
// assigned to a different register as well, simply reeturn.
if (reg != info->reg) {
DCHECK_EQ(info, register_values_[MapRegisterToIndex(info->reg)]);
return;
}
info->reg = Register::no_reg();
// If the value is already spilled, return.
if (info->stack_slot != nullptr) return;
if (try_move) {
// Try to move the value to another register.
int index = -1;
int skip = MapRegisterToIndex(reg);
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
if (i == skip) continue;
if (register_values_[i] == nullptr) {
index = i;
} else if (register_values_[i]->node == info->node) {
// Found an existing register.
info->reg = MapIndexToRegister(i);
return;
}
}
// Allocation succeeded. This might have found an existing allocation.
// Simply update the state anyway.
if (index != -1) {
Register target_reg = MapIndexToRegister(index);
SetRegister(target_reg, info);
// Emit a gapmove.
compiler::AllocatedOperand source(compiler::LocationOperand::REGISTER,
MachineRepresentation::kTagged,
reg.code());
compiler::AllocatedOperand target(compiler::LocationOperand::REGISTER,
MachineRepresentation::kTagged,
target_reg.code());
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->os() << "gap move: ";
graph_labeller()->PrintNodeLabel(std::cout, info->node);
printing_visitor_->os()
<< ": " << target << "" << source << std::endl;
}
AddMoveBeforeCurrentNode(source, target);
return;
}
} else {
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
if (register_values_[i] == info) {
info->reg = MapIndexToRegister(i);
return;
}
}
}
// If all else fails, spill the value.
Spill(info);
}
void StraightForwardRegisterAllocator::InitializeConditionalBranchRegisters(
ConditionalControlNode* node, BasicBlock* target) {
if (target->is_empty_block()) {
// Jumping over an empty block, so we're in fact merging.
Jump* jump = target->control_node()->Cast<Jump>();
target = jump->target();
return MergeRegisterValues(node, target, jump->predecessor_id());
}
if (target->has_state()) {
// Not a fall-through branch, copy the state over.
return InitializeBranchTargetRegisterValues(node, target);
}
// Clear dead fall-through registers.
DCHECK_EQ(node->id() + 1, target->first_id());
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
LiveNodeInfo* info = register_values_[i];
if (info != nullptr && !IsLiveAtTarget(info, node, target)) {
info->reg = Register::no_reg();
register_values_[i] = nullptr;
}
}
}
void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
BasicBlock* block) {
for (Input& input : *node) AssignInput(input);
AssignTemporaries(node);
for (Input& input : *node) UpdateInputUseAndClearDead(node->id(), input);
if (node->properties().is_call()) SpillAndClearRegisters();
// Inject allocation into target phis.
if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
BasicBlock* target = unconditional->target();
if (target->has_phi()) {
Phi::List* phis = target->phis();
for (Phi* phi : *phis) {
Input& input = phi->input(block->predecessor_id());
LiveNodeInfo& info = values_[input.node()];
input.InjectAllocated(info.allocation());
}
for (Phi* phi : *phis) {
Input& input = phi->input(block->predecessor_id());
UpdateInputUseAndClearDead(node->id(), input);
}
}
}
// TODO(verwaest): This isn't a good idea :)
if (node->properties().can_deopt()) SpillRegisters();
// Merge register values. Values only flowing into phis and not being
// independently live will be killed as part of the merge.
if (auto unconditional = node->TryCast<UnconditionalControlNode>()) {
// Empty blocks are immediately merged at the control of their predecessor.
if (!block->is_empty_block()) {
MergeRegisterValues(unconditional, unconditional->target(),
block->predecessor_id());
}
} else if (auto conditional = node->TryCast<ConditionalControlNode>()) {
InitializeConditionalBranchRegisters(conditional, conditional->if_true());
InitializeConditionalBranchRegisters(conditional, conditional->if_false());
}
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
node, ProcessingState(compilation_unit_, block_it_, nullptr, nullptr,
nullptr));
}
}
void StraightForwardRegisterAllocator::TryAllocateToInput(LiveNodeInfo* info,
Phi* phi) {
DCHECK_EQ(info->node, phi);
// Try allocate phis to a register used by any of the inputs.
for (Input& input : *phi) {
if (input.operand().IsRegister()) {
Register reg = input.AssignedRegister();
int index = MapRegisterToIndex(reg);
if (register_values_[index] == nullptr) {
phi->result().SetAllocated(DoAllocate(reg, info));
if (FLAG_trace_maglev_regalloc) {
Phi* phi = info->node->Cast<Phi>();
printing_visitor_->Process(
phi, ProcessingState(compilation_unit_, block_it_, nullptr,
nullptr, nullptr));
printing_visitor_->os()
<< "phi (reuse) " << input.operand() << std::endl;
}
return;
}
}
}
}
void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
compiler::AllocatedOperand source, compiler::AllocatedOperand target) {
GapMove* gap_move =
Node::New<GapMove>(compilation_unit_->zone(), {}, source, target);
if (compilation_unit_->has_graph_labeller()) {
graph_labeller()->RegisterNode(gap_move);
}
if (*node_it_ == nullptr) {
// We're at the control node, so append instead.
(*block_it_)->nodes().Add(gap_move);
node_it_ = (*block_it_)->nodes().end();
} else {
DCHECK_NE(node_it_, (*block_it_)->nodes().end());
node_it_.InsertBefore(gap_move);
}
}
void StraightForwardRegisterAllocator::Spill(LiveNodeInfo* info) {
if (info->stack_slot != nullptr) return;
AllocateSpillSlot(info);
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->os()
<< "spill: " << info->stack_slot->slot << " ← v"
<< graph_labeller()->NodeId(info->node) << std::endl;
}
info->node->Spill(info->stack_slot->slot);
}
void StraightForwardRegisterAllocator::AssignInput(Input& input) {
compiler::UnallocatedOperand operand =
compiler::UnallocatedOperand::cast(input.operand());
LiveNodeInfo* info = &values_[input.node()];
compiler::AllocatedOperand location = info->allocation();
switch (operand.extended_policy()) {
case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
input.SetAllocated(location);
break;
case compiler::UnallocatedOperand::FIXED_REGISTER: {
Register reg = Register::from_code(operand.fixed_register_index());
input.SetAllocated(ForceAllocate(reg, info));
break;
}
case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
if (location.IsAnyRegister()) {
input.SetAllocated(location);
} else {
input.SetAllocated(AllocateRegister(info));
}
break;
case compiler::UnallocatedOperand::FIXED_FP_REGISTER:
case compiler::UnallocatedOperand::SAME_AS_INPUT:
case compiler::UnallocatedOperand::NONE:
case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
UNREACHABLE();
}
compiler::AllocatedOperand allocated =
compiler::AllocatedOperand::cast(input.operand());
if (location != allocated) {
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->os()
<< "gap move: " << allocated << "" << location << std::endl;
}
AddMoveBeforeCurrentNode(location, allocated);
}
}
void StraightForwardRegisterAllocator::SpillRegisters() {
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
LiveNodeInfo* info = register_values_[i];
if (info == nullptr) continue;
Spill(info);
}
}
void StraightForwardRegisterAllocator::SpillAndClearRegisters() {
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
LiveNodeInfo* info = register_values_[i];
if (info == nullptr) continue;
Spill(info);
info->reg = Register::no_reg();
register_values_[i] = nullptr;
}
}
void StraightForwardRegisterAllocator::AllocateSpillSlot(LiveNodeInfo* info) {
DCHECK_NULL(info->stack_slot);
StackSlot* stack_slot = free_slots_.first();
if (stack_slot == nullptr) {
// If there are no free stack slots, allocate a new one.
stack_slot = compilation_unit_->zone()->New<StackSlot>(
MachineRepresentation::kTagged, top_of_stack_++);
} else {
free_slots_.DropHead();
}
info->stack_slot = stack_slot;
}
RegList StraightForwardRegisterAllocator::GetFreeRegisters(int count) {
RegList free_registers = {};
if (count == 0) return free_registers;
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
if (register_values_[i] == nullptr) {
free_registers = CombineRegLists(free_registers,
Register::ListOf(MapIndexToRegister(i)));
if (--count == 0) return free_registers;
}
}
int furthest_use = 0;
int longest = -1;
while (count != 0) {
// Free some register.
DCHECK_NOT_NULL(register_values_[0]);
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
if (!register_values_[i]) continue;
int use = register_values_[i]->next_use;
if (use > furthest_use) {
furthest_use = use;
longest = i;
}
}
DCHECK_NE(-1, longest);
Register reg = MapIndexToRegister(longest);
Free(reg, false);
free_registers = CombineRegLists(free_registers, Register::ListOf(reg));
count--;
}
return free_registers;
}
compiler::AllocatedOperand StraightForwardRegisterAllocator::AllocateRegister(
LiveNodeInfo* info) {
compiler::InstructionOperand allocation = TryAllocateRegister(info);
if (allocation.IsAllocated()) {
return compiler::AllocatedOperand::cast(allocation);
}
// Free some register.
int furthest = 0;
DCHECK_NOT_NULL(register_values_[0]);
for (int i = 1; i < kAllocatableGeneralRegisterCount; i++) {
DCHECK_NOT_NULL(register_values_[i]);
if (register_values_[furthest]->next_use < register_values_[i]->next_use) {
furthest = i;
}
}
return ForceAllocate(MapIndexToRegister(furthest), info, false);
}
compiler::AllocatedOperand StraightForwardRegisterAllocator::ForceAllocate(
const Register& reg, LiveNodeInfo* info, bool try_move) {
if (register_values_[MapRegisterToIndex(reg)] == info) {
return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
MachineRepresentation::kTagged,
reg.code());
}
Free(reg, try_move);
DCHECK_NULL(register_values_[MapRegisterToIndex(reg)]);
return DoAllocate(reg, info);
}
compiler::AllocatedOperand StraightForwardRegisterAllocator::DoAllocate(
const Register& reg, LiveNodeInfo* info) {
SetRegister(reg, info);
return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
MachineRepresentation::kTagged, reg.code());
}
void StraightForwardRegisterAllocator::SetRegister(Register reg,
LiveNodeInfo* info) {
int index = MapRegisterToIndex(reg);
DCHECK_IMPLIES(register_values_[index] != info,
register_values_[index] == nullptr);
register_values_[index] = info;
info->reg = reg;
}
compiler::InstructionOperand
StraightForwardRegisterAllocator::TryAllocateRegister(LiveNodeInfo* info) {
int index = -1;
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
if (register_values_[i] == nullptr) {
index = i;
break;
}
}
// Allocation failed.
if (index == -1) return compiler::InstructionOperand();
// Allocation succeeded. This might have found an existing allocation.
// Simply update the state anyway.
SetRegister(MapIndexToRegister(index), info);
return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
MachineRepresentation::kTagged,
MapIndexToRegister(index).code());
}
void StraightForwardRegisterAllocator::AssignTemporaries(NodeBase* node) {
node->assign_temporaries(GetFreeRegisters(node->num_temporaries_needed()));
}
void StraightForwardRegisterAllocator::InitializeRegisterValues(
RegisterState* target_state) {
// First clear the register state.
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
LiveNodeInfo* info = register_values_[i];
if (info == nullptr) continue;
info->reg = Register::no_reg();
register_values_[i] = nullptr;
}
// Then fill it in with target information.
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
LiveNodeInfo* node;
RegisterMerge* merge;
LoadMergeState(target_state[i], &node, &merge);
if (node == nullptr) {
DCHECK(!target_state[i].GetPayload().is_merge);
continue;
}
register_values_[i] = node;
node->reg = MapIndexToRegister(i);
}
}
void StraightForwardRegisterAllocator::EnsureInRegister(
RegisterState* target_state, LiveNodeInfo* incoming) {
#ifdef DEBUG
int i;
for (i = 0; i < kAllocatableGeneralRegisterCount; i++) {
LiveNodeInfo* node;
RegisterMerge* merge;
LoadMergeState(target_state[i], &node, &merge);
if (node == incoming) break;
}
CHECK_NE(kAllocatableGeneralRegisterCount, i);
#endif
}
void StraightForwardRegisterAllocator::InitializeBranchTargetRegisterValues(
ControlNode* source, BasicBlock* target) {
RegisterState* target_state = target->state()->register_state();
DCHECK(!target_state[0].GetPayload().is_initialized);
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
LiveNodeInfo* info = register_values_[i];
if (!IsLiveAtTarget(info, source, target)) info = nullptr;
target_state[i] = {info, initialized_node};
}
}
void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
BasicBlock* target,
int predecessor_id) {
RegisterState* target_state = target->state()->register_state();
if (!target_state[0].GetPayload().is_initialized) {
// This is the first block we're merging, initialize the values.
return InitializeBranchTargetRegisterValues(control, target);
}
int predecessor_count = target->state()->predecessor_count();
for (int i = 0; i < kAllocatableGeneralRegisterCount; i++) {
LiveNodeInfo* node;
RegisterMerge* merge;
LoadMergeState(target_state[i], &node, &merge);
compiler::AllocatedOperand register_info = {
compiler::LocationOperand::REGISTER, MachineRepresentation::kTagged,
MapIndexToRegister(i).code()};
LiveNodeInfo* incoming = register_values_[i];
if (!IsLiveAtTarget(incoming, control, target)) incoming = nullptr;
if (incoming == node) {
// We're using the same register as the target already has. If registers
// are merged, add input information.
if (merge) merge->operand(predecessor_id) = register_info;
continue;
}
if (merge) {
// The register is already occupied with a different node. Figure out
// where that node is allocated on the incoming branch.
merge->operand(predecessor_id) = node->allocation();
// If there's a value in the incoming state, that value is either
// already spilled or in another place in the merge state.
if (incoming != nullptr && incoming->stack_slot != nullptr) {
EnsureInRegister(target_state, incoming);
}
continue;
}
DCHECK_IMPLIES(node == nullptr, incoming != nullptr);
if (node == nullptr && incoming->stack_slot == nullptr) {
// If the register is unallocated at the merge point, and the incoming
// value isn't spilled, that means we must have seen it already in a
// different register.
EnsureInRegister(target_state, incoming);
continue;
}
const size_t size = sizeof(RegisterMerge) +
predecessor_count * sizeof(compiler::AllocatedOperand);
void* buffer = compilation_unit_->zone()->Allocate<void*>(size);
merge = new (buffer) RegisterMerge();
merge->node = node == nullptr ? incoming : node;
// If the register is unallocated at the merge point, allocation so far
// is the spill slot for the incoming value. Otherwise all incoming
// branches agree that the current node is in the register info.
compiler::AllocatedOperand info_so_far =
node == nullptr ? incoming->stack_slot->slot : register_info;
// Initialize the entire array with info_so_far since we don't know in
// which order we've seen the predecessors so far. Predecessors we
// haven't seen yet will simply overwrite their entry later.
for (int j = 0; j < predecessor_count; j++) {
merge->operand(j) = info_so_far;
}
// If the register is unallocated at the merge point, fill in the
// incoming value. Otherwise find the merge-point node in the incoming
// state.
if (node == nullptr) {
merge->operand(predecessor_id) = register_info;
} else {
merge->operand(predecessor_id) = node->allocation();
}
target_state[i] = {merge, initialized_merge};
}
}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,130 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_REGALLOC_H_
#define V8_MAGLEV_MAGLEV_REGALLOC_H_
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
class MaglevPrintingVisitor;
struct StackSlot {
StackSlot(MachineRepresentation representation, int index)
: slot(compiler::LocationOperand::STACK_SLOT, representation, index) {}
compiler::AllocatedOperand slot;
StackSlot* next_ = nullptr;
StackSlot** next() { return &next_; }
};
struct LiveNodeInfo {
ValueNode* node;
uint32_t last_use = 0;
uint32_t next_use = 0;
StackSlot* stack_slot = nullptr;
Register reg = Register::no_reg();
compiler::AllocatedOperand allocation() const {
if (reg.is_valid()) {
return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
MachineRepresentation::kTagged,
reg.code());
}
DCHECK_NOT_NULL(stack_slot);
return stack_slot->slot;
}
};
class StraightForwardRegisterAllocator {
public:
StraightForwardRegisterAllocator(MaglevCompilationUnit* compilation_unit,
Graph* graph);
~StraightForwardRegisterAllocator();
int stack_slots() const { return top_of_stack_; }
private:
std::vector<int> future_register_uses_[kAllocatableGeneralRegisterCount];
// Currently live values.
std::map<ValueNode*, LiveNodeInfo> values_;
base::ThreadedList<StackSlot> free_slots_;
int top_of_stack_ = 0;
#define N(V) nullptr,
LiveNodeInfo* register_values_[kAllocatableGeneralRegisterCount] = {
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(N)};
#undef N
LiveNodeInfo* MakeLive(ValueNode* node) {
uint32_t last_use = node->live_range().end;
// TODO(verwaest): We don't currently have next_use info...
uint32_t next_use = node->next_use();
return &(values_[node] = {node, last_use, next_use});
}
void ComputePostDominatingHoles(Graph* graph);
void AllocateRegisters(Graph* graph);
void PrintLiveRegs() const;
// Update use info and clear now dead registers.
void UpdateInputUseAndClearDead(uint32_t use, const Input& input);
void AllocateControlNode(ControlNode* node, BasicBlock* block);
void AllocateNode(Node* node);
void AllocateNodeResult(ValueNode* node);
void AssignInput(Input& input);
void AssignTemporaries(NodeBase* node);
void TryAllocateToInput(LiveNodeInfo* info, Phi* phi);
RegList GetFreeRegisters(int count);
void AddMoveBeforeCurrentNode(compiler::AllocatedOperand source,
compiler::AllocatedOperand target);
void AllocateSpillSlot(LiveNodeInfo* info);
void Spill(LiveNodeInfo* info);
void SpillAndClearRegisters();
void SpillRegisters();
compiler::AllocatedOperand AllocateRegister(LiveNodeInfo* info);
compiler::AllocatedOperand ForceAllocate(const Register& reg,
LiveNodeInfo* info,
bool try_move = true);
compiler::AllocatedOperand DoAllocate(const Register& reg,
LiveNodeInfo* info);
void SetRegister(Register reg, LiveNodeInfo* info);
void Free(const Register& reg, bool try_move);
compiler::InstructionOperand TryAllocateRegister(LiveNodeInfo* info);
void InitializeRegisterValues(RegisterState* target_state);
void EnsureInRegister(RegisterState* target_state, LiveNodeInfo* incoming);
void InitializeBranchTargetRegisterValues(ControlNode* source,
BasicBlock* target);
void InitializeConditionalBranchRegisters(ConditionalControlNode* source,
BasicBlock* target);
void MergeRegisterValues(ControlNode* control, BasicBlock* target,
int predecessor_id);
MaglevGraphLabeller* graph_labeller() const {
return compilation_unit_->graph_labeller();
}
MaglevCompilationUnit* compilation_unit_;
std::unique_ptr<MaglevPrintingVisitor> printing_visitor_;
BlockConstIterator block_it_;
NodeIterator node_it_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_REGALLOC_H_

View File

@ -0,0 +1,113 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_REGISTER_FRAME_ARRAY_H_
#define V8_MAGLEV_MAGLEV_REGISTER_FRAME_ARRAY_H_
#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace maglev {
// Vector of values associated with a bytecode's register frame. Indexable by
// interpreter register.
template <typename T>
class RegisterFrameArray {
public:
explicit RegisterFrameArray(const MaglevCompilationUnit& info) {
// The first local is at index zero, parameters are behind it with
// negative indices, and the unoptimized frame header is between the two,
// so the entire frame state including parameters is the distance from the
// last parameter to the last local frame register, plus one to include both
// ends.
interpreter::Register last_local =
interpreter::Register(info.register_count() - 1);
interpreter::Register last_param =
interpreter::Register::FromParameterIndex(info.parameter_count() - 1);
DCHECK_LT(last_param.index(), 0);
T* frame =
info.zone()->NewArray<T>(last_local.index() - last_param.index() + 1);
// Set frame_start_ to a "butterfly" pointer into the middle of the above
// Zone-allocated array. Parameters are at a negative index, so we have to
// subtract it from the above frame pointer.
frame_start_ = frame - last_param.index();
}
// Disallow copy (use CopyFrom instead).
RegisterFrameArray(const RegisterFrameArray& other) V8_NOEXCEPT = delete;
RegisterFrameArray& operator=(const RegisterFrameArray& other)
V8_NOEXCEPT = delete;
// Allow move.
RegisterFrameArray(RegisterFrameArray&& other) V8_NOEXCEPT = default;
RegisterFrameArray& operator=(RegisterFrameArray&& other)
V8_NOEXCEPT = default;
void CopyFrom(const MaglevCompilationUnit& info,
const RegisterFrameArray& other,
const compiler::BytecodeLivenessState* liveness) {
interpreter::Register last_param =
interpreter::Register::FromParameterIndex(info.parameter_count() - 1);
int end = 1;
if (!liveness) {
interpreter::Register last_local =
interpreter::Register(info.register_count() - 1);
end = last_local.index();
}
// All parameters are live.
for (int index = last_param.index(); index <= end; ++index) {
interpreter::Register reg(index);
(*this)[reg] = other[reg];
}
if (liveness) {
for (int index : *liveness) {
interpreter::Register reg(index);
(*this)[reg] = other[reg];
}
}
}
T& operator[](interpreter::Register reg) { return frame_start_[reg.index()]; }
const T& operator[](interpreter::Register reg) const {
return frame_start_[reg.index()];
}
private:
static int DataSize(int register_count, int parameter_count) {
// The first local is at index zero, parameters are behind it with
// negative indices, and the unoptimized frame header is between the two,
// so the entire frame state including parameters is the distance from the
// last parameter to the last local frame register, plus one to include both
// ends.
interpreter::Register last_local =
interpreter::Register(register_count - 1);
interpreter::Register last_param =
interpreter::Register::FromParameterIndex(parameter_count - 1);
return last_local.index() - last_param.index() + 1;
}
T* data_begin(int parameter_count) const {
return frame_start_ +
interpreter::Register::FromParameterIndex(parameter_count - 1)
.index();
}
// Butterfly pointer for registers, pointing into the middle of a
// Zone-allocated Node array.
// |
// v
// [Parameters] [Unoptimized Frame Header] [Locals]
T* frame_start_ = nullptr;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_REGISTER_FRAME_ARRAY_H_

View File

@ -0,0 +1,57 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
#define V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
#include "src/maglev/maglev-basic-block.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
class ProcessingState;
class MaglevVregAllocationState {
public:
int AllocateVirtualRegister() { return next_virtual_register_++; }
int num_allocated_registers() const { return next_virtual_register_; }
private:
int next_virtual_register_ = 0;
};
class MaglevVregAllocator {
public:
static constexpr bool kNeedsCheckpointStates = true;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
for (BasicBlock* block : *graph) {
if (!block->has_phi()) continue;
for (Phi* phi : *block->phis()) {
phi->AllocateVregInPostProcess(&state_);
}
}
}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
#define DEF_PROCESS_NODE(NAME) \
void Process(NAME* node, const ProcessingState& state) { \
node->AllocateVreg(&state_, state); \
}
NODE_BASE_LIST(DEF_PROCESS_NODE)
#undef DEF_PROCESS_NODE
private:
MaglevVregAllocationState state_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_

37
src/maglev/maglev.cc Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev.h"
#include "src/common/globals.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-compiler.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/shared-function-info-inl.h"
namespace v8 {
namespace internal {
MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate,
Handle<JSFunction> function) {
CanonicalHandleScope canonical_handle_scope(isolate);
Zone broker_zone(isolate->allocator(), "maglev-broker-zone");
compiler::JSHeapBroker broker(isolate, &broker_zone, FLAG_trace_heap_broker,
CodeKind::MAGLEV);
compiler::CompilationDependencies* deps =
broker_zone.New<compiler::CompilationDependencies>(&broker, &broker_zone);
USE(deps); // The deps register themselves in the heap broker.
broker.SetTargetNativeContextRef(handle(function->native_context(), isolate));
broker.InitializeAndStartSerializing();
broker.StopSerializing();
maglev::MaglevCompiler compiler(&broker, function);
return ToCodeT(compiler.Compile(), isolate);
}
} // namespace internal
} // namespace v8

32
src/maglev/maglev.h Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_H_
#define V8_MAGLEV_MAGLEV_H_
// TODO(v8:7700): Remove all references to V8_ENABLE_MAGLEV once maglev ships.
#ifndef V8_ENABLE_MAGLEV
// Let's explicitly avoid accidental includes for now.
#error Maglev should be enabled.
#endif // V8_ENABLE_MAGLEV
#include "src/handles/handles.h"
namespace v8 {
namespace internal {
class Isolate;
class JSFunction;
class Maglev : public AllStatic {
public:
static MaybeHandle<CodeT> Compile(Isolate* isolate,
Handle<JSFunction> function);
};
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_H_

View File

@ -605,6 +605,8 @@ inline bool Code::is_turbofanned() const {
return IsTurbofannedField::decode(flags);
}
bool Code::is_maglevved() const { return kind() == CodeKind::MAGLEV; }
inline bool Code::can_have_weak_objects() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
int32_t flags =
@ -672,7 +674,7 @@ void Code::set_inlined_bytecode_size(unsigned size) {
}
bool Code::uses_safepoint_table() const {
return is_turbofanned() || is_wasm_code();
return is_turbofanned() || is_maglevved() || is_wasm_code();
}
int Code::stack_slots() const {

View File

@ -28,6 +28,7 @@ namespace internal {
V(C_WASM_ENTRY) \
V(INTERPRETED_FUNCTION) \
V(BASELINE) \
V(MAGLEV) \
V(TURBOFAN)
enum class CodeKind {
@ -62,12 +63,16 @@ inline constexpr bool CodeKindIsUnoptimizedJSFunction(CodeKind kind) {
}
inline constexpr bool CodeKindIsOptimizedJSFunction(CodeKind kind) {
return kind == CodeKind::TURBOFAN;
STATIC_ASSERT(static_cast<int>(CodeKind::MAGLEV) + 1 ==
static_cast<int>(CodeKind::TURBOFAN));
return base::IsInRange(kind, CodeKind::MAGLEV, CodeKind::TURBOFAN);
}
inline constexpr bool CodeKindIsJSFunction(CodeKind kind) {
return CodeKindIsUnoptimizedJSFunction(kind) ||
CodeKindIsOptimizedJSFunction(kind);
STATIC_ASSERT(static_cast<int>(CodeKind::BASELINE) + 1 ==
static_cast<int>(CodeKind::MAGLEV));
return base::IsInRange(kind, CodeKind::INTERPRETED_FUNCTION,
CodeKind::TURBOFAN);
}
inline constexpr bool CodeKindIsBuiltinOrJSFunction(CodeKind kind) {

View File

@ -436,6 +436,10 @@ class Code : public HeapObject {
// TurboFan optimizing compiler.
inline bool is_turbofanned() const;
// TODO(jgruber): Reconsider these predicates; we should probably merge them
// and rename to something appropriate.
inline bool is_maglevved() const;
// [can_have_weak_objects]: If CodeKindIsOptimizedJSFunction(kind), tells
// whether the embedded objects in code should be treated weakly.
inline bool can_have_weak_objects() const;

View File

@ -5,12 +5,12 @@
type OptimizationMarker extends uint16 constexpr 'OptimizationMarker';
bitfield struct FeedbackVectorFlags extends uint32 {
optimization_marker: OptimizationMarker: 2 bit;
optimization_marker: OptimizationMarker: 3 bit;
// Whether the maybe_optimized_code field contains a code object. 'maybe',
// because they flag may lag behind the actual state of the world (it will be
// updated in time).
maybe_has_optimized_code: bool: 1 bit;
all_your_bits_are_belong_to_jgruber: uint32: 29 bit;
all_your_bits_are_belong_to_jgruber: uint32: 28 bit;
}
@generateBodyDescriptor

View File

@ -79,44 +79,10 @@ void JSFunction::SetInterruptBudget() {
}
}
void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
Isolate* isolate = GetIsolate();
if (!isolate->concurrent_recompilation_enabled() ||
isolate->bootstrapper()->IsActive()) {
mode = ConcurrencyMode::kNotConcurrent;
}
DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsBaseline());
DCHECK(!ActiveTierIsTurbofan());
DCHECK(shared().HasBytecodeArray());
DCHECK(shared().allows_lazy_compilation() ||
!shared().optimization_disabled());
if (mode == ConcurrencyMode::kConcurrent) {
if (IsInOptimizationQueue()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Not marking ");
ShortPrint();
PrintF(" -- already in optimization queue.\n");
}
return;
}
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
ShortPrint();
PrintF(" for concurrent recompilation.\n");
}
}
SetOptimizationMarker(
mode == ConcurrencyMode::kConcurrent
? OptimizationMarker::kCompileTurbofan_Concurrent
: OptimizationMarker::kCompileTurbofan_NotConcurrent);
}
bool JSFunction::IsInOptimizationQueue() {
if (!has_feedback_vector()) return false;
return IsInOptimizationQueueMarker(feedback_vector().optimization_marker());
return feedback_vector().optimization_marker() ==
OptimizationMarker::kInOptimizationQueue;
}
void JSFunction::CompleteInobjectSlackTrackingIfActive() {

View File

@ -117,6 +117,7 @@ base::Optional<CodeKind> JSFunction::GetActiveTier() const {
#ifdef DEBUG
CHECK(highest_tier == CodeKind::TURBOFAN ||
highest_tier == CodeKind::BASELINE ||
highest_tier == CodeKind::MAGLEV ||
highest_tier == CodeKind::INTERPRETED_FUNCTION);
if (highest_tier == CodeKind::INTERPRETED_FUNCTION) {
@ -143,7 +144,9 @@ bool JSFunction::ActiveTierIsBaseline() const {
return GetActiveTier() == CodeKind::BASELINE;
}
CodeKind JSFunction::NextTier() const { return CodeKind::TURBOFAN; }
bool JSFunction::ActiveTierIsMaglev() const {
return GetActiveTier() == CodeKind::MAGLEV;
}
bool JSFunction::CanDiscardCompiled() const {
// Essentially, what we are asking here is, has this function been compiled
@ -160,6 +163,62 @@ bool JSFunction::CanDiscardCompiled() const {
return (result & kJSFunctionCodeKindsMask) != 0;
}
namespace {
constexpr OptimizationMarker OptimizationMarkerFor(CodeKind target_kind,
ConcurrencyMode mode) {
DCHECK(target_kind == CodeKind::MAGLEV || target_kind == CodeKind::TURBOFAN);
return target_kind == CodeKind::MAGLEV
? (mode == ConcurrencyMode::kConcurrent
? OptimizationMarker::kCompileMaglev_Concurrent
: OptimizationMarker::kCompileMaglev_NotConcurrent)
: (mode == ConcurrencyMode::kConcurrent
? OptimizationMarker::kCompileTurbofan_Concurrent
: OptimizationMarker::kCompileTurbofan_NotConcurrent);
}
} // namespace
void JSFunction::MarkForOptimization(Isolate* isolate, CodeKind target_kind,
ConcurrencyMode mode) {
if (!isolate->concurrent_recompilation_enabled() ||
isolate->bootstrapper()->IsActive()) {
mode = ConcurrencyMode::kNotConcurrent;
}
DCHECK(CodeKindIsOptimizedJSFunction(target_kind));
DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsBaseline() ||
ActiveTierIsMaglev());
DCHECK(!ActiveTierIsTurbofan());
DCHECK(shared().HasBytecodeArray());
DCHECK(shared().allows_lazy_compilation() ||
!shared().optimization_disabled());
if (mode == ConcurrencyMode::kConcurrent) {
if (IsInOptimizationQueue()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Not marking ");
ShortPrint();
PrintF(" -- already in optimization queue.\n");
}
return;
}
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
ShortPrint();
PrintF(" for concurrent %s recompilation.\n",
CodeKindToString(target_kind));
}
}
SetOptimizationMarker(OptimizationMarkerFor(target_kind, mode));
}
void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
Isolate* isolate = GetIsolate();
MarkForOptimization(isolate, CodeKind::TURBOFAN, mode);
}
// static
MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
Handle<JSBoundFunction> function) {

View File

@ -129,8 +129,7 @@ class JSFunction
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition() const;
bool ActiveTierIsTurbofan() const;
bool ActiveTierIsBaseline() const;
CodeKind NextTier() const;
bool ActiveTierIsMaglev() const;
// Similar to SharedFunctionInfo::CanDiscardCompiled. Returns true, if the
// attached code can be recreated at a later point by replacing it with
@ -146,7 +145,10 @@ class JSFunction
// Mark this function for lazy recompilation. The function will be recompiled
// the next time it is executed.
inline void MarkForOptimization(ConcurrencyMode mode);
void MarkForOptimization(Isolate* isolate, CodeKind target_kind,
ConcurrencyMode mode);
// TODO(v8:7700): Remove this function and pass the CodeKind explicitly.
void MarkForOptimization(ConcurrencyMode mode);
// Tells whether or not the function is already marked for lazy recompilation.
inline bool IsMarkedForOptimization();

View File

@ -29,16 +29,16 @@ namespace internal {
namespace {
Object CompileTurbofan(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode) {
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
CodeKind target_kind, ConcurrencyMode mode) {
StackLimitCheck check(isolate);
// Concurrent optimization runs on another thread, thus no additional gap.
const int stack_gap = mode == ConcurrencyMode::kConcurrent
? 0
: kStackSpaceRequiredForCompilation * KB;
if (check.JsHasOverflowed(stack_gap)) return isolate->StackOverflow();
const int gap = mode == ConcurrencyMode::kConcurrent
? 0
: kStackSpaceRequiredForCompilation * KB;
if (check.JsHasOverflowed(gap)) return isolate->StackOverflow();
Compiler::CompileOptimized(isolate, function, mode, function->NextTier());
Compiler::CompileOptimized(isolate, function, mode, target_kind);
// As a post-condition of CompileOptimized, the function *must* be compiled,
// i.e. the installed Code object must not be the CompileLazy builtin.
@ -92,18 +92,36 @@ RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
return baseline_code;
}
RUNTIME_FUNCTION(Runtime_CompileMaglev_Concurrent) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
return CompileOptimized(isolate, function, CodeKind::MAGLEV,
ConcurrencyMode::kConcurrent);
}
RUNTIME_FUNCTION(Runtime_CompileMaglev_NotConcurrent) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
return CompileOptimized(isolate, function, CodeKind::MAGLEV,
ConcurrencyMode::kNotConcurrent);
}
RUNTIME_FUNCTION(Runtime_CompileTurbofan_Concurrent) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
return CompileTurbofan(isolate, function, ConcurrencyMode::kConcurrent);
return CompileOptimized(isolate, function, CodeKind::TURBOFAN,
ConcurrencyMode::kConcurrent);
}
RUNTIME_FUNCTION(Runtime_CompileTurbofan_NotConcurrent) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
return CompileTurbofan(isolate, function, ConcurrencyMode::kNotConcurrent);
return CompileOptimized(isolate, function, CodeKind::TURBOFAN,
ConcurrencyMode::kNotConcurrent);
}
RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {

View File

@ -37,6 +37,10 @@
#include "src/snapshot/snapshot.h"
#include "src/web-snapshot/web-snapshot.h"
#ifdef V8_ENABLE_MAGLEV
#include "src/maglev/maglev.h"
#endif // V8_ENABLE_MAGLEV
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-engine.h"
#endif // V8_ENABLE_WEBASSEMBLY
@ -216,11 +220,14 @@ RUNTIME_FUNCTION(Runtime_IsAtomicsWaitAllowed) {
namespace {
enum class TierupKind { kTierupBytecode, kTierupBytecodeOrMidTier };
template <CodeKind code_kind>
bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
TierupKind tierup_kind,
IsCompiledScope* is_compiled_scope) {
IsCompiledScope* is_compiled_scope);
template <>
bool CanOptimizeFunction<CodeKind::TURBOFAN>(
Handle<JSFunction> function, Isolate* isolate,
IsCompiledScope* is_compiled_scope) {
// The following conditions were lifted (in part) from the DCHECK inside
// JSFunction::MarkForOptimization().
@ -252,8 +259,7 @@ bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
}
CodeKind kind = CodeKindForTopTier();
if ((tierup_kind == TierupKind::kTierupBytecode &&
function->HasAvailableOptimizedCode()) ||
if (function->HasAvailableOptimizedCode() ||
function->HasAvailableCodeKind(kind)) {
DCHECK(function->HasAttachedOptimizedCode() ||
function->ChecksOptimizationMarker());
@ -266,8 +272,23 @@ bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
return true;
}
Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
TierupKind tierup_kind) {
#ifdef V8_ENABLE_MAGLEV
template <>
bool CanOptimizeFunction<CodeKind::MAGLEV>(Handle<JSFunction> function,
Isolate* isolate,
IsCompiledScope* is_compiled_scope) {
if (!FLAG_maglev) return false;
CHECK(!IsAsmWasmFunction(isolate, *function));
// TODO(v8:7700): Disabled optimization due to deopts?
// TODO(v8:7700): Already cached?
return function->GetActiveTier() < CodeKind::MAGLEV;
}
#endif // V8_ENABLE_MAGLEV
Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate) {
if (args.length() != 1 && args.length() != 2) {
return CrashUnlessFuzzing(isolate);
}
@ -276,10 +297,11 @@ Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
static constexpr CodeKind kCodeKind = CodeKind::TURBOFAN;
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate));
if (!CanOptimizeFunction(function, isolate, tierup_kind,
&is_compiled_scope)) {
if (!CanOptimizeFunction<kCodeKind>(function, isolate, &is_compiled_scope)) {
return ReadOnlyRoots(isolate).undefined_value();
}
@ -296,9 +318,8 @@ Object OptimizeFunctionOnNextCall(RuntimeArguments& args, Isolate* isolate,
if (FLAG_trace_opt) {
PrintF("[manually marking ");
function->ShortPrint();
PrintF(" for %s optimization]\n",
concurrency_mode == ConcurrencyMode::kConcurrent ? "concurrent"
: "non-concurrent");
PrintF(" for %s %s optimization]\n", ToString(concurrency_mode),
CodeKindToString(kCodeKind));
}
// This function may not have been lazily compiled yet, even though its shared
@ -378,9 +399,80 @@ RUNTIME_FUNCTION(Runtime_CompileBaseline) {
return *function;
}
// TODO(v8:7700): Remove this function once we no longer need it to measure
// maglev compile times. For normal tierup, OptimizeMaglevOnNextCall should be
// used instead.
#ifdef V8_ENABLE_MAGLEV
RUNTIME_FUNCTION(Runtime_BenchMaglev) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_SMI_ARG_CHECKED(count, 1);
Handle<CodeT> codet;
base::ElapsedTimer timer;
timer.Start();
codet = Maglev::Compile(isolate, function).ToHandleChecked();
for (int i = 1; i < count; ++i) {
HandleScope handle_scope(isolate);
Maglev::Compile(isolate, function);
}
PrintF("Maglev compile time: %g ms!\n",
timer.Elapsed().InMillisecondsF() / count);
function->set_code(*codet);
return ReadOnlyRoots(isolate).undefined_value();
}
#else
RUNTIME_FUNCTION(Runtime_BenchMaglev) {
PrintF("Maglev is not enabled.\n");
return ReadOnlyRoots(isolate).undefined_value();
}
#endif // V8_ENABLE_MAGLEV
#ifdef V8_ENABLE_MAGLEV
RUNTIME_FUNCTION(Runtime_OptimizeMaglevOnNextCall) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
static constexpr CodeKind kCodeKind = CodeKind::MAGLEV;
IsCompiledScope is_compiled_scope(
function->shared().is_compiled_scope(isolate));
if (!CanOptimizeFunction<kCodeKind>(function, isolate, &is_compiled_scope)) {
return ReadOnlyRoots(isolate).undefined_value();
}
DCHECK(is_compiled_scope.is_compiled());
DCHECK(function->is_compiled());
// TODO(v8:7700): Support concurrent compiles.
const ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
if (FLAG_trace_opt) {
PrintF("[manually marking ");
function->ShortPrint();
PrintF(" for %s %s optimization]\n", ToString(concurrency_mode),
CodeKindToString(kCodeKind));
}
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
function->MarkForOptimization(isolate, kCodeKind, concurrency_mode);
return ReadOnlyRoots(isolate).undefined_value();
}
#else
RUNTIME_FUNCTION(Runtime_OptimizeMaglevOnNextCall) {
PrintF("Maglev is not enabled.\n");
return ReadOnlyRoots(isolate).undefined_value();
}
#endif // V8_ENABLE_MAGLEV
// TODO(jgruber): Rename to OptimizeTurbofanOnNextCall.
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
return OptimizeFunctionOnNextCall(args, isolate, TierupKind::kTierupBytecode);
return OptimizeFunctionOnNextCall(args, isolate);
}
RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {

View File

@ -107,6 +107,8 @@ namespace internal {
F(CompileForOnStackReplacement, 0, 1) \
F(CompileLazy, 1, 1) \
F(CompileBaseline, 1, 1) \
F(CompileMaglev_Concurrent, 1, 1) \
F(CompileMaglev_NotConcurrent, 1, 1) \
F(CompileTurbofan_Concurrent, 1, 1) \
F(CompileTurbofan_NotConcurrent, 1, 1) \
F(InstallBaselineCode, 1, 1) \
@ -467,6 +469,7 @@ namespace internal {
F(ArrayIteratorProtector, 0, 1) \
F(ArraySpeciesProtector, 0, 1) \
F(BaselineOsr, -1, 1) \
F(BenchMaglev, 2, 1) \
F(ClearFunctionFeedback, 1, 1) \
F(ClearMegamorphicStubCache, 0, 1) \
F(CompleteInobjectSlackTracking, 1, 1) \
@ -529,6 +532,7 @@ namespace internal {
F(NeverOptimizeFunction, 1, 1) \
F(NewRegExpWithBacktrackLimit, 3, 1) \
F(NotifyContextDisposed, 0, 1) \
F(OptimizeMaglevOnNextCall, 1, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
F(OptimizeOsr, -1, 1) \
F(PrepareFunctionForOptimization, -1, 1) \

View File

@ -48,7 +48,7 @@ Running test: testErrorStackWithRuntimeEnabled
callFrames : [
[0] : {
columnNumber : 10
functionName :
functionName :
lineNumber : 8
scriptId : <scriptId>
url : test.js
@ -209,10 +209,10 @@ Running test: testErrorStackWithRuntimeEnabled
}
[23] : {
columnNumber : 0
functionName :
functionName :
lineNumber : 0
scriptId : <scriptId>
url :
url :
}
]
}
@ -272,7 +272,7 @@ Running test: testErrorStackWithRuntimeEnabled
callFrames : [
[0] : {
columnNumber : 10
functionName :
functionName :
lineNumber : 8
scriptId : <scriptId>
url : test.js
@ -337,7 +337,7 @@ Running test: testErrorStackWithRuntimeEnabled
callFrames : [
[0] : {
columnNumber : 10
functionName :
functionName :
lineNumber : 8
scriptId : <scriptId>
url : test.js

View File

@ -48,7 +48,7 @@ Running test: testErrorStackTraceLimitWithRuntimeEnabled
callFrames : [
[0] : {
columnNumber : 10
functionName :
functionName :
lineNumber : 8
scriptId : <scriptId>
url : test.js
@ -209,10 +209,10 @@ Running test: testErrorStackTraceLimitWithRuntimeEnabled
}
[23] : {
columnNumber : 0
functionName :
functionName :
lineNumber : 0
scriptId : <scriptId>
url :
url :
}
]
}
@ -272,7 +272,7 @@ Running test: testErrorStackTraceLimitWithRuntimeEnabled
callFrames : [
[0] : {
columnNumber : 10
functionName :
functionName :
lineNumber : 8
scriptId : <scriptId>
url : test.js
@ -337,7 +337,7 @@ Running test: testErrorStackTraceLimitWithRuntimeEnabled
callFrames : [
[0] : {
columnNumber : 10
functionName :
functionName :
lineNumber : 8
scriptId : <scriptId>
url : test.js
@ -439,7 +439,7 @@ Running test: testErrorStackTraceLimitNonNumber
callFrames : [
[0] : {
columnNumber : 10
functionName :
functionName :
lineNumber : 8
scriptId : <scriptId>
url : test.js
@ -600,10 +600,10 @@ Running test: testErrorStackTraceLimitNonNumber
}
[23] : {
columnNumber : 0
functionName :
functionName :
lineNumber : 0
scriptId : <scriptId>
url :
url :
}
]
}
@ -639,7 +639,7 @@ Running test: testErrorStackTraceLimitDeleted
callFrames : [
[0] : {
columnNumber : 10
functionName :
functionName :
lineNumber : 8
scriptId : <scriptId>
url : test.js
@ -800,10 +800,10 @@ Running test: testErrorStackTraceLimitDeleted
}
[23] : {
columnNumber : 0
functionName :
functionName :
lineNumber : 0
scriptId : <scriptId>
url :
url :
}
]
}

18
test/mjsunit/maglev/00.js Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(x) {
if (x) return 1;
return 2;
}
%PrepareFunctionForOptimization(f);
assertEquals(1, f(true));
assertEquals(2, f(false));
%OptimizeMaglevOnNextCall(f);
assertEquals(1, f(true));
assertEquals(2, f(false));

20
test/mjsunit/maglev/01.js Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
var xyz = 42;
function f(x) {
if (x) return 1;
return xyz;
}
%PrepareFunctionForOptimization(f);
assertEquals(1, f(true));
assertEquals(42, f(false));
%OptimizeMaglevOnNextCall(f);
assertEquals(1, f(true));
assertEquals(42, f(false));

20
test/mjsunit/maglev/02.js Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(x) {
if (x < 0) return -1;
return 1;
}
%PrepareFunctionForOptimization(f);
assertEquals(-1, f(-2));
assertEquals(1, f(0));
assertEquals(1, f(2));
%OptimizeMaglevOnNextCall(f);
assertEquals(-1, f(-2));
assertEquals(1, f(0));
assertEquals(1, f(2));

21
test/mjsunit/maglev/03.js Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(x) {
var y = 0;
for (var i = 0; i < x; i++) {
y = 1;
}
return y;
}
%PrepareFunctionForOptimization(f);
assertEquals(1, f(true));
assertEquals(0, f(false));
%OptimizeMaglevOnNextCall(f);
assertEquals(1, f(true));
assertEquals(0, f(false));

16
test/mjsunit/maglev/04.js Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(x) {
while (true) {
if (x) return 10;
}
}
%PrepareFunctionForOptimization(f);
assertEquals(10, f(true));
%OptimizeMaglevOnNextCall(f);
assertEquals(10, f(true));

21
test/mjsunit/maglev/05.js Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(i, end) {
do {
do {
i = end;
} while (end);
end = i;
} while (i);
return 10;
}
%PrepareFunctionForOptimization(f);
assertEquals(10, f(false, false));
%OptimizeMaglevOnNextCall(f);
assertEquals(10, f(false, false));

25
test/mjsunit/maglev/06.js Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(i, j) {
var x = 1;
var y = 2;
if (i) {
x = y;
if (j) {
x = 3
}
}
return x;
}
%PrepareFunctionForOptimization(f);
assertEquals(1, f(false, true));
%OptimizeMaglevOnNextCall(f);
assertEquals(1, f(false, false));
assertEquals(2, f(true, false));
assertEquals(3, f(true, true));

19
test/mjsunit/maglev/07.js Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(i) {
var x = 1;
if (i) { x = 2 }
return x;
}
%PrepareFunctionForOptimization(f);
assertEquals(2, f(true));
assertEquals(1, f(false));
%OptimizeMaglevOnNextCall(f);
assertEquals(2, f(true));
assertEquals(1, f(false));

19
test/mjsunit/maglev/08.js Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(i) {
var x = 1;
if (i) {} else { x = 2 }
return x;
}
%PrepareFunctionForOptimization(f);
assertEquals(1, f(true));
assertEquals(2, f(false));
%OptimizeMaglevOnNextCall(f);
assertEquals(1, f(true));
assertEquals(2, f(false));

21
test/mjsunit/maglev/09.js Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(i) {
var x = 1;
if (i) {
if (i) { x = 3 } else {}
} else { x = 2 }
return x;
}
%PrepareFunctionForOptimization(f);
assertEquals(3, f(true));
assertEquals(2, f(false));
%OptimizeMaglevOnNextCall(f);
assertEquals(3, f(true));
assertEquals(2, f(false));

26
test/mjsunit/maglev/10.js Normal file
View File

@ -0,0 +1,26 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
const ys = [0,1,2];
function g() {
%CollectGarbage(42);
return [0,1,2];
}
%NeverOptimizeFunction(g);
const o = { g: g };
function f(o) {
// Using CallProperty since plain calls are still unimplemented.
return o.g();
}
%PrepareFunctionForOptimization(f);
assertEquals(ys, f(o));
%OptimizeMaglevOnNextCall(f);
assertEquals(ys, f(o));

39
test/mjsunit/maglev/11.js Normal file
View File

@ -0,0 +1,39 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(x) {
return x.a
}
function Foo(a) {
this.a = a
}
%PrepareFunctionForOptimization(f);
// Smi
var o1_1 = new Foo(1);
var o1_2 = new Foo(1);
// Transition map to double, o1 is deprecated, o1's map is a deprecation target.
var o2 = new Foo(1.2);
// Transition map to tagged, o1 is still deprecated.
var an_object = {};
var o3 = new Foo(an_object);
assertEquals(1, f(o1_1));
assertEquals(1.2, f(o2));
assertEquals(an_object, f(o3));
// o1_1 got migrated, but o1_2 hasn't yet.
assertTrue(%HaveSameMap(o1_1,o3));
assertFalse(%HaveSameMap(o1_2,o3));
%OptimizeMaglevOnNextCall(f);
// Deprecated map works
assertEquals(1, f(o1_2));
// Non-deprecated map works
assertEquals(an_object, f(o3));

27
test/mjsunit/maglev/12.js Normal file
View File

@ -0,0 +1,27 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function g() {
%CollectGarbage(42);
return 43;
}
%NeverOptimizeFunction(g);
const o = { g: g };
function f(o, x) {
var y = 42;
if (x) y = 43;
// Using CallProperty since plain calls are still unimplemented.
o.g();
return y;
}
%PrepareFunctionForOptimization(f);
assertEquals(43, f(o, true));
%OptimizeMaglevOnNextCall(f);
assertEquals(43, f(o, true));

17
test/mjsunit/maglev/13.js Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(a) {
while(true) {
if(5 < ++a) return a;
}
}
%PrepareFunctionForOptimization(f);
assertEquals(f(0), 6);
%OptimizeMaglevOnNextCall(f);
assertEquals(f(0), 6);

31
test/mjsunit/maglev/14.js Normal file
View File

@ -0,0 +1,31 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
function f(i) {
a:{
b: {
c: {
if (i < 100) {
break c;
} else {
break b;
}
i = 3;
}
i = 4;
break a;
}
i = 5;
}
return i;
}
%PrepareFunctionForOptimization(f);
assertEquals(f(1), 4);
%OptimizeMaglevOnNextCall(f);
assertEquals(f(1), 4);

17
test/mjsunit/maglev/15.js Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax
var xyz = 42;
function f(x) {
return x < x;
}
%PrepareFunctionForOptimization(f);
assertEquals(f(1), false);
%OptimizeMaglevOnNextCall(f);
assertEquals(f(1), false);