[maglev] Add a (mostly empty for now) MaglevAssembler

Bug: v8:7700
Change-Id: Idf4cd2544e7ee3912809cbf95cee4823be36d1dd
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3875905
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Auto-Submit: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83021}
This commit is contained in:
Jakob Linke 2022-09-07 11:18:47 +02:00 committed by V8 LUCI CQ
parent 49abe45e41
commit e5eab3d67a
8 changed files with 675 additions and 864 deletions

View File

@ -3615,6 +3615,8 @@ v8_header_set("v8_internal_headers") {
if (v8_enable_maglev) {
sources += [
"src/maglev/maglev-assembler-inl.h",
"src/maglev/maglev-assembler.h",
"src/maglev/maglev-basic-block.h",
"src/maglev/maglev-code-gen-state.h",
"src/maglev/maglev-code-generator.h",

View File

@ -0,0 +1,39 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
#define V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
#include "src/codegen/macro-assembler-inl.h"
#include "src/maglev/maglev-assembler.h"
#include "src/maglev/maglev-code-gen-state.h"
namespace v8 {
namespace internal {
namespace maglev {
inline void MaglevAssembler::DefineLazyDeoptPoint(LazyDeoptInfo* info) {
info->deopting_call_return_pc = pc_offset_for_safepoint();
code_gen_state()->PushLazyDeopt(info);
safepoint_table_builder()->DefineSafepoint(this);
}
inline void MaglevAssembler::DefineExceptionHandlerPoint(NodeBase* node) {
ExceptionHandlerInfo* info = node->exception_handler_info();
if (!info->HasExceptionHandler()) return;
info->pc_offset = pc_offset_for_safepoint();
code_gen_state()->PushHandlerInfo(node);
}
inline void MaglevAssembler::DefineExceptionHandlerAndLazyDeoptPoint(
NodeBase* node) {
DefineExceptionHandlerPoint(node);
DefineLazyDeoptPoint(node->lazy_deopt_info());
}
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_

View File

@ -0,0 +1,71 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_ASSEMBLER_H_
#define V8_MAGLEV_MAGLEV_ASSEMBLER_H_
#include "src/codegen/macro-assembler.h"
#include "src/maglev/maglev-code-gen-state.h"
namespace v8 {
namespace internal {
namespace maglev {
class MaglevAssembler : public MacroAssembler {
public:
explicit MaglevAssembler(MaglevCodeGenState* code_gen_state)
: MacroAssembler(code_gen_state->isolate(), CodeObjectRequired::kNo),
code_gen_state_(code_gen_state) {}
inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
return MemOperand(rbp, GetFramePointerOffsetForStackSlot(operand));
}
inline MemOperand ToMemOperand(const compiler::InstructionOperand& operand) {
return GetStackSlot(compiler::AllocatedOperand::cast(operand));
}
inline MemOperand ToMemOperand(const ValueLocation& location) {
return ToMemOperand(location.operand());
}
inline int GetFramePointerOffsetForStackSlot(
const compiler::AllocatedOperand& operand) {
int index = operand.index();
if (operand.representation() != MachineRepresentation::kTagged) {
index += code_gen_state()->tagged_slots();
}
return GetFramePointerOffsetForStackSlot(index);
}
inline void DefineLazyDeoptPoint(LazyDeoptInfo* info);
inline void DefineExceptionHandlerPoint(NodeBase* node);
inline void DefineExceptionHandlerAndLazyDeoptPoint(NodeBase* node);
compiler::NativeContextRef native_context() const {
return code_gen_state()->broker()->target_native_context();
}
MaglevCodeGenState* code_gen_state() const { return code_gen_state_; }
MaglevSafepointTableBuilder* safepoint_table_builder() const {
return code_gen_state()->safepoint_table_builder();
}
MaglevCompilationInfo* compilation_info() const {
return code_gen_state()->compilation_info();
}
private:
inline constexpr int GetFramePointerOffsetForStackSlot(int index) {
return StandardFrameConstants::kExpressionsOffset -
index * kSystemPointerSize;
}
MaglevCodeGenState* const code_gen_state_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_ASSEMBLER_H_

View File

@ -8,7 +8,6 @@
#include "src/codegen/assembler.h"
#include "src/codegen/label.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/maglev-safepoint-table.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
@ -21,11 +20,11 @@ namespace internal {
namespace maglev {
class InterpreterFrameState;
class MaglevAssembler;
class DeferredCodeInfo {
public:
virtual void Generate(MaglevCodeGenState* code_gen_state,
Label* return_label) = 0;
virtual void Generate(MaglevAssembler* masm, Label* return_label) = 0;
Label deferred_code_label;
Label return_label;
};
@ -35,8 +34,7 @@ class MaglevCodeGenState {
MaglevCodeGenState(MaglevCompilationInfo* compilation_info,
MaglevSafepointTableBuilder* safepoint_table_builder)
: compilation_info_(compilation_info),
safepoint_table_builder_(safepoint_table_builder),
masm_(isolate(), CodeObjectRequired::kNo) {}
safepoint_table_builder_(safepoint_table_builder) {}
void set_tagged_slots(int slots) { tagged_slots_ = slots; }
void set_untagged_slots(int slots) { untagged_slots_ = slots; }
@ -55,13 +53,9 @@ class MaglevCodeGenState {
const std::vector<LazyDeoptInfo*>& lazy_deopts() const {
return lazy_deopts_;
}
inline void DefineLazyDeoptPoint(LazyDeoptInfo* info);
void PushHandlerInfo(NodeBase* node) { handlers_.push_back(node); }
const std::vector<NodeBase*>& handlers() const { return handlers_; }
inline void DefineExceptionHandlerPoint(NodeBase* node);
inline void DefineExceptionHandlerAndLazyDeoptPoint(NodeBase* node);
compiler::NativeContextRef native_context() const {
return broker()->target_native_context();
@ -71,49 +65,17 @@ class MaglevCodeGenState {
MaglevGraphLabeller* graph_labeller() const {
return compilation_info_->graph_labeller();
}
MacroAssembler* masm() { return &masm_; }
int stack_slots() const { return untagged_slots_ + tagged_slots_; }
int tagged_slots() const { return tagged_slots_; }
MaglevSafepointTableBuilder* safepoint_table_builder() const {
return safepoint_table_builder_;
}
MaglevCompilationInfo* compilation_info() const { return compilation_info_; }
inline int GetFramePointerOffsetForStackSlot(
const compiler::AllocatedOperand& operand) {
int index = operand.index();
if (operand.representation() != MachineRepresentation::kTagged) {
index += tagged_slots_;
}
return GetFramePointerOffsetForStackSlot(index);
}
inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
return MemOperand(rbp, GetFramePointerOffsetForStackSlot(operand));
}
inline MemOperand ToMemOperand(const compiler::InstructionOperand& operand) {
return GetStackSlot(compiler::AllocatedOperand::cast(operand));
}
inline MemOperand ToMemOperand(const ValueLocation& location) {
return ToMemOperand(location.operand());
}
inline MemOperand TopOfStack() {
return MemOperand(rbp,
GetFramePointerOffsetForStackSlot(stack_slots() - 1));
}
private:
inline constexpr int GetFramePointerOffsetForStackSlot(int index) {
return StandardFrameConstants::kExpressionsOffset -
index * kSystemPointerSize;
}
MaglevCompilationInfo* const compilation_info_;
MaglevSafepointTableBuilder* const safepoint_table_builder_;
MacroAssembler masm_;
std::vector<DeferredCodeInfo*> deferred_code_;
std::vector<EagerDeoptInfo*> eager_deopts_;
std::vector<LazyDeoptInfo*> lazy_deopts_;
@ -158,25 +120,6 @@ inline DoubleRegister ToDoubleRegister(const ValueLocation& location) {
return ToDoubleRegister(location.operand());
}
inline void MaglevCodeGenState::DefineLazyDeoptPoint(LazyDeoptInfo* info) {
info->deopting_call_return_pc = masm()->pc_offset_for_safepoint();
PushLazyDeopt(info);
safepoint_table_builder()->DefineSafepoint(masm());
}
inline void MaglevCodeGenState::DefineExceptionHandlerPoint(NodeBase* node) {
ExceptionHandlerInfo* info = node->exception_handler_info();
if (!info->HasExceptionHandler()) return;
info->pc_offset = masm()->pc_offset_for_safepoint();
PushHandlerInfo(node);
}
inline void MaglevCodeGenState::DefineExceptionHandlerAndLazyDeoptPoint(
NodeBase* node) {
DefineExceptionHandlerPoint(node);
DefineLazyDeoptPoint(node->lazy_deopt_info());
}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -19,6 +19,7 @@
#include "src/deoptimizer/translation-array.h"
#include "src/execution/frame-constants.h"
#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-assembler-inl.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h"
@ -93,17 +94,15 @@ class ParallelMoveResolver {
RegisterTHelper<RegisterT>::kAllocatableRegisters;
public:
explicit ParallelMoveResolver(MaglevCodeGenState* code_gen_state)
: code_gen_state_(code_gen_state) {}
explicit ParallelMoveResolver(MaglevAssembler* masm) : masm_(masm) {}
void RecordMove(ValueNode* source_node, compiler::InstructionOperand source,
compiler::AllocatedOperand target) {
if (target.IsRegister()) {
RecordMoveToRegister(source_node, source, ToRegisterT<RegisterT>(target));
} else {
RecordMoveToStackSlot(
source_node, source,
code_gen_state_->GetFramePointerOffsetForStackSlot(target));
RecordMoveToStackSlot(source_node, source,
masm_->GetFramePointerOffsetForStackSlot(target));
}
}
@ -118,7 +117,7 @@ class ParallelMoveResolver {
ValueNode* materializing_register_move =
materializing_register_moves_[reg.code()];
if (materializing_register_move) {
materializing_register_move->LoadToRegister(code_gen_state_, reg);
materializing_register_move->LoadToRegister(masm_, reg);
}
}
// Emit stack moves until the move set is empty -- each EmitMoveChain will
@ -128,7 +127,7 @@ class ParallelMoveResolver {
StartEmitMoveChain(moves_from_stack_slot_.begin()->first);
}
for (auto [stack_slot, node] : materializing_stack_slot_moves_) {
node->LoadToRegister(code_gen_state_, kScratchRegT);
node->LoadToRegister(masm_, kScratchRegT);
EmitStackMove(stack_slot, kScratchRegT);
}
}
@ -219,7 +218,7 @@ class ParallelMoveResolver {
moves_from_register_[source_reg.code()].registers.set(target_reg);
}
} else if (source.IsAnyStackSlot()) {
uint32_t source_slot = code_gen_state_->GetFramePointerOffsetForStackSlot(
uint32_t source_slot = masm_->GetFramePointerOffsetForStackSlot(
compiler::AllocatedOperand::cast(source));
moves_from_stack_slot_[source_slot].registers.set(target_reg);
} else {
@ -240,7 +239,7 @@ class ParallelMoveResolver {
moves_from_register_[source_reg.code()].stack_slots.push_back(
target_slot);
} else if (source.IsAnyStackSlot()) {
uint32_t source_slot = code_gen_state_->GetFramePointerOffsetForStackSlot(
uint32_t source_slot = masm_->GetFramePointerOffsetForStackSlot(
compiler::AllocatedOperand::cast(source));
if (source_slot != target_slot) {
moves_from_stack_slot_[source_slot].stack_slots.push_back(target_slot);
@ -405,9 +404,9 @@ class ParallelMoveResolver {
__ movq(MemOperand(rbp, stack_slot), kScratchRegister);
}
MacroAssembler* masm() { return code_gen_state_->masm(); }
MacroAssembler* masm() const { return masm_; }
MaglevCodeGenState* code_gen_state_;
MaglevAssembler* const masm_;
// Keep moves to/from registers and stack slots separate -- there are a fixed
// number of registers but an infinite number of stack slots, so the register
@ -433,8 +432,8 @@ class ParallelMoveResolver {
class ExceptionHandlerTrampolineBuilder {
public:
ExceptionHandlerTrampolineBuilder(MaglevCodeGenState* code_gen_state)
: code_gen_state_(code_gen_state) {}
explicit ExceptionHandlerTrampolineBuilder(MaglevAssembler* masm)
: masm_(masm) {}
void EmitTrampolineFor(NodeBase* node) {
DCHECK(node->properties().can_throw());
@ -460,13 +459,13 @@ class ExceptionHandlerTrampolineBuilder {
}
private:
MaglevCodeGenState* code_gen_state_;
MaglevAssembler* const masm_;
using Move = std::pair<const ValueLocation&, ValueNode*>;
base::SmallVector<Move, 16> direct_moves_;
base::SmallVector<Move, 16> materialisation_moves_;
bool save_accumulator_ = false;
MacroAssembler* masm() { return code_gen_state_->masm(); }
MacroAssembler* masm() const { return masm_; }
void ClearState() {
direct_moves_.clear();
@ -603,12 +602,12 @@ class ExceptionHandlerTrampolineBuilder {
MemOperand ToMemOperand(ValueNode* node) {
DCHECK(node->allocation().IsAnyStackSlot());
return code_gen_state_->ToMemOperand(node->allocation());
return masm_->ToMemOperand(node->allocation());
}
MemOperand ToMemOperand(const ValueLocation& location) {
DCHECK(location.operand().IsStackSlot());
return code_gen_state_->ToMemOperand(location.operand());
return masm_->ToMemOperand(location.operand());
}
template <typename Operand>
@ -624,9 +623,9 @@ class ExceptionHandlerTrampolineBuilder {
void EmitConstantLoad(const ValueLocation& dst, ValueNode* value) {
DCHECK(value->allocation().IsConstant());
if (dst.operand().IsRegister()) {
value->LoadToRegister(code_gen_state_, dst.AssignedGeneralRegister());
value->LoadToRegister(masm_, dst.AssignedGeneralRegister());
} else {
value->LoadToRegister(code_gen_state_, kScratchRegister);
value->LoadToRegister(masm_, kScratchRegister);
__ movq(ToMemOperand(dst), kScratchRegister);
}
}
@ -634,8 +633,8 @@ class ExceptionHandlerTrampolineBuilder {
class MaglevCodeGeneratingNodeProcessor {
public:
explicit MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState* code_gen_state)
: code_gen_state_(code_gen_state) {}
explicit MaglevCodeGeneratingNodeProcessor(MaglevAssembler* masm)
: masm_(masm) {}
void PreProcessGraph(MaglevCompilationInfo*, Graph* graph) {
if (FLAG_maglev_break_on_entry) {
@ -686,8 +685,8 @@ class MaglevCodeGeneratingNodeProcessor {
__ Push(kJSFunctionRegister); // Callee's JS function.
__ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
code_gen_state_->set_untagged_slots(graph->untagged_stack_slots());
code_gen_state_->set_tagged_slots(graph->tagged_stack_slots());
code_gen_state()->set_untagged_slots(graph->untagged_stack_slots());
code_gen_state()->set_tagged_slots(graph->tagged_stack_slots());
{
ASM_CODE_COMMENT_STRING(masm(), " Stack/interrupt check");
@ -699,7 +698,7 @@ class MaglevCodeGeneratingNodeProcessor {
__ Move(kScratchRegister, rsp);
// TODO(leszeks): Include a max call argument size here.
__ subq(kScratchRegister,
Immediate(code_gen_state_->stack_slots() * kSystemPointerSize));
Immediate(code_gen_state()->stack_slots() * kSystemPointerSize));
__ cmpq(kScratchRegister,
__ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
@ -758,7 +757,7 @@ class MaglevCodeGeneratingNodeProcessor {
__ PushAll(RegisterInput::kAllowedRegisters);
// Push the frame size
__ Push(Immediate(
Smi::FromInt(code_gen_state_->stack_slots() * kSystemPointerSize)));
Smi::FromInt(code_gen_state()->stack_slots() * kSystemPointerSize)));
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
__ PopAll(RegisterInput::kAllowedRegisters);
__ jmp(&deferred_call_stack_guard_return_);
@ -787,7 +786,7 @@ class MaglevCodeGeneratingNodeProcessor {
__ movq(kScratchRegister, rbp);
__ subq(kScratchRegister, rsp);
__ cmpq(kScratchRegister,
Immediate(code_gen_state_->stack_slots() * kSystemPointerSize +
Immediate(code_gen_state()->stack_slots() * kSystemPointerSize +
StandardFrameConstants::kFixedFrameSizeFromFp));
__ Assert(equal, AbortReason::kStackAccessBelowStackPointer);
}
@ -798,7 +797,7 @@ class MaglevCodeGeneratingNodeProcessor {
state);
}
node->GenerateCode(code_gen_state_, state);
node->GenerateCode(masm(), state);
if (std::is_base_of<ValueNode, NodeT>::value) {
ValueNode* value_node = node->template Cast<ValueNode>();
@ -809,10 +808,10 @@ class MaglevCodeGeneratingNodeProcessor {
if (!source.IsAnyStackSlot()) {
if (FLAG_code_comments) __ RecordComment("-- Spill:");
if (source.IsRegister()) {
__ movq(code_gen_state_->GetStackSlot(value_node->spill_slot()),
__ movq(masm()->GetStackSlot(value_node->spill_slot()),
ToRegister(source));
} else {
__ Movsd(code_gen_state_->GetStackSlot(value_node->spill_slot()),
__ Movsd(masm()->GetStackSlot(value_node->spill_slot()),
ToDoubleRegister(source));
}
} else {
@ -836,8 +835,8 @@ class MaglevCodeGeneratingNodeProcessor {
// TODO(leszeks): Move these to fields, to allow their data structure
// allocations to be reused. Will need some sort of state resetting.
ParallelMoveResolver<Register> register_moves(code_gen_state_);
ParallelMoveResolver<DoubleRegister> double_register_moves(code_gen_state_);
ParallelMoveResolver<Register> register_moves(masm_);
ParallelMoveResolver<DoubleRegister> double_register_moves(masm_);
// Remember what registers were assigned to by a Phi, to avoid clobbering
// them with RegisterMoves.
@ -922,17 +921,20 @@ class MaglevCodeGeneratingNodeProcessor {
double_register_moves.EmitMoves();
}
Isolate* isolate() const { return code_gen_state_->isolate(); }
MacroAssembler* masm() const { return code_gen_state_->masm(); }
Isolate* isolate() const { return masm_->isolate(); }
MaglevAssembler* masm() const { return masm_; }
MaglevCodeGenState* code_gen_state() const {
return masm()->code_gen_state();
}
MaglevGraphLabeller* graph_labeller() const {
return code_gen_state_->graph_labeller();
return code_gen_state()->graph_labeller();
}
MaglevSafepointTableBuilder* safepoint_table_builder() const {
return code_gen_state_->safepoint_table_builder();
return code_gen_state()->safepoint_table_builder();
}
private:
MaglevCodeGenState* code_gen_state_;
MaglevAssembler* const masm_;
Label deferred_call_stack_guard_;
Label deferred_call_stack_guard_return_;
};
@ -952,7 +954,8 @@ class MaglevCodeGeneratorImpl final {
graph->tagged_stack_slots(),
graph->untagged_stack_slots()),
code_gen_state_(compilation_info, safepoint_table_builder()),
processor_(compilation_info, &code_gen_state_),
masm_(&code_gen_state_),
processor_(compilation_info, &masm_),
graph_(graph) {}
MaybeHandle<Code> Generate() {
@ -972,7 +975,7 @@ class MaglevCodeGeneratorImpl final {
for (DeferredCodeInfo* deferred_code : code_gen_state_.deferred_code()) {
__ RecordComment("-- Deferred block");
__ bind(&deferred_code->deferred_code_label);
deferred_code->Generate(&code_gen_state_, &deferred_code->return_label);
deferred_code->Generate(masm(), &deferred_code->return_label);
__ Trap();
}
}
@ -1013,7 +1016,7 @@ class MaglevCodeGeneratorImpl final {
void EmitExceptionHandlersTrampolines() {
if (code_gen_state_.handlers().size() == 0) return;
ExceptionHandlerTrampolineBuilder builder(&code_gen_state_);
ExceptionHandlerTrampolineBuilder builder(masm());
__ RecordComment("-- Exception handlers trampolines");
for (NodeBase* node : code_gen_state_.handlers()) {
builder.EmitTrampolineFor(node);
@ -1151,13 +1154,14 @@ class MaglevCodeGeneratorImpl final {
Isolate* isolate() const {
return code_gen_state_.compilation_info()->isolate();
}
MacroAssembler* masm() { return code_gen_state_.masm(); }
MaglevAssembler* masm() { return &masm_; }
MaglevSafepointTableBuilder* safepoint_table_builder() {
return &safepoint_table_builder_;
}
MaglevSafepointTableBuilder safepoint_table_builder_;
MaglevCodeGenState code_gen_state_;
MaglevAssembler masm_;
GraphProcessor<MaglevCodeGeneratingNodeProcessor> processor_;
Graph* const graph_;

View File

@ -13,7 +13,6 @@
#include "src/base/threaded-list.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/common/globals.h"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff