[maglev] Arm64 boilerplate

The absolute minimum to compile arm64 with v8_enable_maglev.

Bug: v8:7700
Change-Id: I7e1a0e31397f1677977c416d0ecc68fd6ee35b12
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4055115
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Auto-Submit: Victor Gomes <victorgomes@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84498}
This commit is contained in:
Victor Gomes 2022-11-25 16:33:38 +01:00 committed by V8 LUCI CQ
parent 7216ab0c08
commit 3a13bc5596
10 changed files with 521 additions and 194 deletions

View File

@ -3600,7 +3600,9 @@ v8_header_set("v8_internal_headers") {
"src/maglev/maglev-vreg-allocator.h",
"src/maglev/maglev.h",
]
if (v8_current_cpu == "x64") {
if (v8_current_cpu == "arm64") {
sources += [ "src/maglev/arm64/maglev-assembler-arm64-inl.h" ]
} else if (v8_current_cpu == "x64") {
sources += [ "src/maglev/x64/maglev-assembler-x64-inl.h" ]
}
}
@ -4780,10 +4782,17 @@ v8_source_set("v8_base_without_compiler") {
"src/maglev/maglev-graph-builder.cc",
"src/maglev/maglev-graph-printer.cc",
"src/maglev/maglev-interpreter-frame-state.cc",
"src/maglev/maglev-ir.cc",
"src/maglev/maglev-regalloc.cc",
"src/maglev/maglev.cc",
]
if (v8_current_cpu == "x64") {
if (v8_current_cpu == "arm64") {
sources += [
"src/maglev/arm64/maglev-assembler-arm64.cc",
"src/maglev/arm64/maglev-code-generator-arm64.cc",
"src/maglev/arm64/maglev-ir-arm64.cc",
]
} else if (v8_current_cpu == "x64") {
sources += [
"src/maglev/x64/maglev-assembler-x64.cc",
"src/maglev/x64/maglev-code-generator-x64.cc",

View File

@ -1796,6 +1796,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
PopSizeRegList(regs, kSRegSizeInBits);
}
inline void PushAll(RegList registers) { PushXRegList(registers); }
inline void PopAll(RegList registers) { PopXRegList(registers); }
inline void PushAll(DoubleRegList registers, int stack_slot_size) {
PushQRegList(registers);
}
inline void PopAll(DoubleRegList registers, int stack_slot_size) {
PopQRegList(registers);
}
// Push the specified register 'count' times.
void PushMultipleTimes(CPURegister src, Register count);

View File

@ -0,0 +1,16 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-assembler.h"
namespace v8 {
namespace internal {
namespace maglev {
// TODO(v8:7700): Implement!
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,36 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-code-generator.h"
#include "src/maglev/maglev-graph.h"
namespace v8 {
namespace internal {
namespace maglev {
MaglevCodeGenerator::MaglevCodeGenerator(
LocalIsolate* isolate, MaglevCompilationInfo* compilation_info,
Graph* graph)
: local_isolate_(isolate),
safepoint_table_builder_(compilation_info->zone(),
graph->tagged_stack_slots(),
graph->untagged_stack_slots()),
translation_array_builder_(compilation_info->zone()),
code_gen_state_(compilation_info, &safepoint_table_builder_),
masm_(isolate->GetMainThreadIsolateUnsafe(), &code_gen_state_),
graph_(graph),
deopt_literals_(isolate->heap()->heap()) {}
void MaglevCodeGenerator::Assemble() {
// TODO(v8:7700): To implement! :)
}
MaybeHandle<Code> MaglevCodeGenerator::Generate(Isolate* isolate) {
// TODO(v8:7700): To implement! :)
return {};
}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,228 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
#define UNIMPLEMENTED_NODE(Node, ...) \
void Node ::AllocateVreg(MaglevVregAllocationState* vreg_state) {} \
\
void Node ::GenerateCode(MaglevAssembler* masm, \
const ProcessingState& state) { \
USE(__VA_ARGS__); \
}
#define UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(Node, ...) \
UNIMPLEMENTED_NODE(Node, __VA_ARGS__) \
void Node::PrintParams(std::ostream& os, \
MaglevGraphLabeller* graph_labeller) const {}
UNIMPLEMENTED_NODE(GenericAdd)
UNIMPLEMENTED_NODE(GenericSubtract)
UNIMPLEMENTED_NODE(GenericMultiply)
UNIMPLEMENTED_NODE(GenericDivide)
UNIMPLEMENTED_NODE(GenericModulus)
UNIMPLEMENTED_NODE(GenericExponentiate)
UNIMPLEMENTED_NODE(GenericBitwiseAnd)
UNIMPLEMENTED_NODE(GenericBitwiseOr)
UNIMPLEMENTED_NODE(GenericBitwiseXor)
UNIMPLEMENTED_NODE(GenericShiftLeft)
UNIMPLEMENTED_NODE(GenericShiftRight)
UNIMPLEMENTED_NODE(GenericShiftRightLogical)
UNIMPLEMENTED_NODE(GenericBitwiseNot)
UNIMPLEMENTED_NODE(GenericNegate)
UNIMPLEMENTED_NODE(GenericIncrement)
UNIMPLEMENTED_NODE(GenericDecrement)
UNIMPLEMENTED_NODE(GenericEqual)
UNIMPLEMENTED_NODE(GenericStrictEqual)
UNIMPLEMENTED_NODE(GenericLessThan)
UNIMPLEMENTED_NODE(GenericLessThanOrEqual)
UNIMPLEMENTED_NODE(GenericGreaterThan)
UNIMPLEMENTED_NODE(GenericGreaterThanOrEqual)
UNIMPLEMENTED_NODE(Int32AddWithOverflow)
UNIMPLEMENTED_NODE(Int32SubtractWithOverflow)
UNIMPLEMENTED_NODE(Int32MultiplyWithOverflow)
UNIMPLEMENTED_NODE(Int32DivideWithOverflow)
UNIMPLEMENTED_NODE(Int32ModulusWithOverflow)
UNIMPLEMENTED_NODE(Int32BitwiseAnd)
UNIMPLEMENTED_NODE(Int32BitwiseOr)
UNIMPLEMENTED_NODE(Int32BitwiseXor)
UNIMPLEMENTED_NODE(Int32ShiftLeft)
UNIMPLEMENTED_NODE(Int32ShiftRight)
UNIMPLEMENTED_NODE(Int32ShiftRightLogical)
UNIMPLEMENTED_NODE(Int32BitwiseNot)
UNIMPLEMENTED_NODE(Int32NegateWithOverflow)
UNIMPLEMENTED_NODE(Int32IncrementWithOverflow)
UNIMPLEMENTED_NODE(Int32DecrementWithOverflow)
UNIMPLEMENTED_NODE(Int32Equal)
UNIMPLEMENTED_NODE(Int32StrictEqual)
UNIMPLEMENTED_NODE(Int32LessThan)
UNIMPLEMENTED_NODE(Int32LessThanOrEqual)
UNIMPLEMENTED_NODE(Int32GreaterThan)
UNIMPLEMENTED_NODE(Int32GreaterThanOrEqual)
UNIMPLEMENTED_NODE(Float64Add)
UNIMPLEMENTED_NODE(Float64Subtract)
UNIMPLEMENTED_NODE(Float64Multiply)
UNIMPLEMENTED_NODE(Float64Divide)
UNIMPLEMENTED_NODE(Float64Exponentiate)
UNIMPLEMENTED_NODE(Float64Modulus)
UNIMPLEMENTED_NODE(Float64Negate)
UNIMPLEMENTED_NODE(Float64Equal)
UNIMPLEMENTED_NODE(Float64StrictEqual)
UNIMPLEMENTED_NODE(Float64LessThan)
UNIMPLEMENTED_NODE(Float64LessThanOrEqual)
UNIMPLEMENTED_NODE(Float64GreaterThan)
UNIMPLEMENTED_NODE(Float64GreaterThanOrEqual)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(Float64Ieee754Unary)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(Constant)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(Float64Constant)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(Int32Constant)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(RootConstant)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(SmiConstant)
UNIMPLEMENTED_NODE(BuiltinStringFromCharCode)
UNIMPLEMENTED_NODE(BuiltinStringPrototypeCharCodeAt)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(Call, receiver_mode_, target_type_,
feedback_)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CallBuiltin)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CallRuntime)
UNIMPLEMENTED_NODE(CallWithArrayLike)
UNIMPLEMENTED_NODE(CallWithSpread)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CallKnownJSFunction)
UNIMPLEMENTED_NODE(Construct)
UNIMPLEMENTED_NODE(ConstructWithSpread)
UNIMPLEMENTED_NODE(ConvertReceiver, mode_)
UNIMPLEMENTED_NODE(ConvertHoleToUndefined)
UNIMPLEMENTED_NODE(CreateEmptyArrayLiteral)
UNIMPLEMENTED_NODE(CreateArrayLiteral)
UNIMPLEMENTED_NODE(CreateShallowArrayLiteral)
UNIMPLEMENTED_NODE(CreateObjectLiteral)
UNIMPLEMENTED_NODE(CreateEmptyObjectLiteral)
UNIMPLEMENTED_NODE(CreateShallowObjectLiteral)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CreateFunctionContext)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CreateClosure)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(FastCreateClosure)
UNIMPLEMENTED_NODE(CreateRegExpLiteral)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(DeleteProperty)
UNIMPLEMENTED_NODE(ForInPrepare)
UNIMPLEMENTED_NODE(ForInNext)
UNIMPLEMENTED_NODE(GeneratorRestoreRegister)
UNIMPLEMENTED_NODE(GetIterator)
UNIMPLEMENTED_NODE(GetSecondReturnedValue)
UNIMPLEMENTED_NODE(GetTemplateObject)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(InitialValue)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(LoadTaggedField)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(LoadDoubleField)
UNIMPLEMENTED_NODE(LoadTaggedElement)
UNIMPLEMENTED_NODE(LoadSignedIntDataViewElement, type_)
UNIMPLEMENTED_NODE(LoadDoubleDataViewElement)
UNIMPLEMENTED_NODE(LoadSignedIntTypedArrayElement, elements_kind_)
UNIMPLEMENTED_NODE(LoadUnsignedIntTypedArrayElement, elements_kind_)
UNIMPLEMENTED_NODE(LoadDoubleTypedArrayElement, elements_kind_)
UNIMPLEMENTED_NODE(LoadDoubleElement)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(LoadGlobal)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(LoadNamedGeneric)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(LoadNamedFromSuperGeneric)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(SetNamedGeneric)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(DefineNamedOwnGeneric)
UNIMPLEMENTED_NODE(StoreInArrayLiteralGeneric)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(StoreGlobal)
UNIMPLEMENTED_NODE(GetKeyedGeneric)
UNIMPLEMENTED_NODE(SetKeyedGeneric)
UNIMPLEMENTED_NODE(DefineKeyedOwnGeneric)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(Phi)
void Phi::AllocateVregInPostProcess(MaglevVregAllocationState*) {}
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(RegisterInput)
UNIMPLEMENTED_NODE(CheckedSmiTagInt32)
UNIMPLEMENTED_NODE(CheckedSmiTagUint32)
UNIMPLEMENTED_NODE(UnsafeSmiTag)
UNIMPLEMENTED_NODE(CheckedSmiUntag)
UNIMPLEMENTED_NODE(UnsafeSmiUntag)
UNIMPLEMENTED_NODE(CheckedInternalizedString, check_type_)
UNIMPLEMENTED_NODE(CheckedObjectToIndex)
UNIMPLEMENTED_NODE(CheckedTruncateNumberToInt32)
UNIMPLEMENTED_NODE(CheckedInt32ToUint32)
UNIMPLEMENTED_NODE(CheckedUint32ToInt32)
UNIMPLEMENTED_NODE(ChangeInt32ToFloat64)
UNIMPLEMENTED_NODE(ChangeUint32ToFloat64)
UNIMPLEMENTED_NODE(CheckedTruncateFloat64ToInt32)
UNIMPLEMENTED_NODE(CheckedTruncateFloat64ToUint32)
UNIMPLEMENTED_NODE(TruncateUint32ToInt32)
UNIMPLEMENTED_NODE(TruncateFloat64ToInt32)
UNIMPLEMENTED_NODE(Int32ToNumber)
UNIMPLEMENTED_NODE(Uint32ToNumber)
UNIMPLEMENTED_NODE(Float64Box)
UNIMPLEMENTED_NODE(HoleyFloat64Box)
UNIMPLEMENTED_NODE(CheckedFloat64Unbox)
UNIMPLEMENTED_NODE(LogicalNot)
UNIMPLEMENTED_NODE(SetPendingMessage)
UNIMPLEMENTED_NODE(StringAt)
UNIMPLEMENTED_NODE(StringLength)
UNIMPLEMENTED_NODE(ToBoolean)
UNIMPLEMENTED_NODE(ToBooleanLogicalNot)
UNIMPLEMENTED_NODE(TaggedEqual)
UNIMPLEMENTED_NODE(TaggedNotEqual)
UNIMPLEMENTED_NODE(TestInstanceOf)
UNIMPLEMENTED_NODE(TestUndetectable)
UNIMPLEMENTED_NODE(TestTypeOf, literal_)
UNIMPLEMENTED_NODE(ToName)
UNIMPLEMENTED_NODE(ToNumberOrNumeric)
UNIMPLEMENTED_NODE(ToObject)
UNIMPLEMENTED_NODE(ToString)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(ConstantGapMove)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(GapMove)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(AssertInt32, condition_, reason_)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CheckDynamicValue)
UNIMPLEMENTED_NODE(CheckInt32IsSmi)
UNIMPLEMENTED_NODE(CheckUint32IsSmi)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CheckHeapObject)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CheckInt32Condition, condition_, reason_)
UNIMPLEMENTED_NODE(CheckJSArrayBounds)
UNIMPLEMENTED_NODE(CheckJSDataViewBounds, element_type_)
UNIMPLEMENTED_NODE(CheckJSObjectElementsBounds)
UNIMPLEMENTED_NODE(CheckJSTypedArrayBounds, elements_kind_)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CheckMaps, check_type_)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CheckMapsWithMigration, check_type_)
UNIMPLEMENTED_NODE(CheckNumber)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CheckSmi)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CheckString, check_type_)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CheckSymbol, check_type_)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CheckValue)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(CheckInstanceType, check_type_)
UNIMPLEMENTED_NODE(DebugBreak)
UNIMPLEMENTED_NODE(GeneratorStore)
UNIMPLEMENTED_NODE(JumpLoopPrologue, loop_depth_, unit_)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(StoreMap)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(StoreDoubleField)
UNIMPLEMENTED_NODE(StoreSignedIntDataViewElement, type_)
UNIMPLEMENTED_NODE(StoreDoubleDataViewElement)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(StoreTaggedFieldNoWriteBarrier)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(StoreTaggedFieldWithWriteBarrier)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(IncreaseInterruptBudget)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(ReduceInterruptBudget)
UNIMPLEMENTED_NODE(ThrowReferenceErrorIfHole)
UNIMPLEMENTED_NODE(ThrowSuperNotCalledIfHole)
UNIMPLEMENTED_NODE(ThrowSuperAlreadyCalledIfNotHole)
UNIMPLEMENTED_NODE(ThrowIfNotSuperConstructor)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(BranchIfRootConstant)
UNIMPLEMENTED_NODE(BranchIfToBooleanTrue)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(BranchIfReferenceCompare, operation_)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(BranchIfInt32Compare, operation_)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(BranchIfFloat64Compare, operation_)
UNIMPLEMENTED_NODE(BranchIfUndefinedOrNull)
UNIMPLEMENTED_NODE(BranchIfJSReceiver)
UNIMPLEMENTED_NODE(Switch)
UNIMPLEMENTED_NODE(Jump)
UNIMPLEMENTED_NODE(JumpLoop)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(JumpToInlined)
UNIMPLEMENTED_NODE(JumpFromInlined)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(Abort)
UNIMPLEMENTED_NODE(Return)
UNIMPLEMENTED_NODE_WITH_PRINT_PARAMS(Deopt)
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -41,17 +41,9 @@ class MaglevAssembler : public MacroAssembler {
: MacroAssembler(isolate, CodeObjectRequired::kNo),
code_gen_state_(code_gen_state) {}
inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
return MemOperand(rbp, GetFramePointerOffsetForStackSlot(operand));
}
inline MemOperand ToMemOperand(const compiler::InstructionOperand& operand) {
return GetStackSlot(compiler::AllocatedOperand::cast(operand));
}
inline MemOperand ToMemOperand(const ValueLocation& location) {
return ToMemOperand(location.operand());
}
inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand);
inline MemOperand ToMemOperand(const compiler::InstructionOperand& operand);
inline MemOperand ToMemOperand(const ValueLocation& location);
inline int GetFramePointerOffsetForStackSlot(
const compiler::AllocatedOperand& operand) {

201
src/maglev/maglev-ir.cc Normal file
View File

@ -0,0 +1,201 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-ir.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/local-heap.h"
#include "src/heap/parked-scope.h"
#include "src/maglev/maglev-graph-labeller.h"
namespace v8 {
namespace internal {
namespace maglev {
const char* OpcodeToString(Opcode opcode) {
#define DEF_NAME(Name) #Name,
static constexpr const char* const names[] = {NODE_BASE_LIST(DEF_NAME)};
#undef DEF_NAME
return names[static_cast<int>(opcode)];
}
namespace {
// ---
// Print
// ---
void PrintInputs(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeBase* node) {
if (!node->has_inputs()) return;
os << " [";
for (int i = 0; i < node->input_count(); i++) {
if (i != 0) os << ", ";
graph_labeller->PrintInput(os, node->input(i));
}
os << "]";
}
void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeBase* node) {}
void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const ValueNode* node) {
os << "" << node->result().operand();
if (node->has_valid_live_range()) {
os << ", live range: [" << node->live_range().start << "-"
<< node->live_range().end << "]";
}
}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeBase* node) {}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const UnconditionalControlNode* node) {
os << " b" << graph_labeller->BlockId(node->target());
}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const BranchControlNode* node) {
os << " b" << graph_labeller->BlockId(node->if_true()) << " b"
<< graph_labeller->BlockId(node->if_false());
}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const Switch* node) {
for (int i = 0; i < node->size(); i++) {
const BasicBlockRef& target = node->Cast<Switch>()->targets()[i];
os << " b" << graph_labeller->BlockId(target.block_ptr());
}
if (node->Cast<Switch>()->has_fallthrough()) {
BasicBlock* fallthrough_target = node->Cast<Switch>()->fallthrough();
os << " b" << graph_labeller->BlockId(fallthrough_target);
}
}
class MaybeUnparkForPrint {
public:
MaybeUnparkForPrint() {
LocalHeap* local_heap = LocalHeap::Current();
if (!local_heap) {
local_heap = Isolate::Current()->main_thread_local_heap();
}
DCHECK_NOT_NULL(local_heap);
if (local_heap->IsParked()) {
scope_.emplace(local_heap);
}
}
private:
base::Optional<UnparkedScope> scope_;
};
template <typename NodeT>
void PrintImpl(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeT* node, bool skip_targets) {
MaybeUnparkForPrint unpark;
os << node->opcode();
node->PrintParams(os, graph_labeller);
PrintInputs(os, graph_labeller, node);
PrintResult(os, graph_labeller, node);
if (!skip_targets) {
PrintTargets(os, graph_labeller, node);
}
}
size_t GetInputLocationsArraySize(const DeoptFrame& top_frame) {
size_t size = 0;
const DeoptFrame* frame = &top_frame;
do {
switch (frame->type()) {
case DeoptFrame::FrameType::kInterpretedFrame:
size += frame->as_interpreted().frame_state()->size(
frame->as_interpreted().unit());
break;
case DeoptFrame::FrameType::kBuiltinContinuationFrame:
size += frame->as_builtin_continuation().parameters().size() + 1;
break;
}
frame = frame->parent();
} while (frame != nullptr);
return size;
}
} // namespace
bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const {
switch (index_) {
case RootIndex::kFalseValue:
case RootIndex::kNullValue:
case RootIndex::kUndefinedValue:
case RootIndex::kempty_string:
return false;
default:
return true;
}
}
DeoptInfo::DeoptInfo(Zone* zone, DeoptFrame top_frame,
compiler::FeedbackSource feedback_to_update)
: top_frame_(top_frame),
feedback_to_update_(feedback_to_update),
input_locations_(zone->NewArray<InputLocation>(
GetInputLocationsArraySize(top_frame))) {
// Initialise InputLocations so that they correctly don't have a next use id.
for (size_t i = 0; i < GetInputLocationsArraySize(top_frame); ++i) {
new (&input_locations_[i]) InputLocation();
}
}
bool LazyDeoptInfo::IsResultRegister(interpreter::Register reg) const {
if (V8_LIKELY(result_size_ == 1)) {
return reg == result_location_;
}
DCHECK_EQ(result_size_, 2);
return reg == result_location_ ||
reg == interpreter::Register(result_location_.index() + 1);
}
void NodeBase::Print(std::ostream& os, MaglevGraphLabeller* graph_labeller,
bool skip_targets) const {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return PrintImpl(os, graph_labeller, this->Cast<Name>(), skip_targets);
NODE_BASE_LIST(V)
#undef V
}
UNREACHABLE();
}
void NodeBase::Print() const {
MaglevGraphLabeller labeller;
Print(std::cout, &labeller);
std::cout << std::endl;
}
void ValueNode::SetNoSpillOrHint() {
DCHECK_EQ(state_, kLastUse);
DCHECK(!IsConstantNode(opcode()));
#ifdef DEBUG
state_ = kSpillOrHint;
#endif // DEBUG
spill_or_hint_ = compiler::InstructionOperand();
}
void ValueNode::SetConstantLocation() {
DCHECK(IsConstantNode(opcode()));
#ifdef DEBUG
state_ = kSpillOrHint;
#endif // DEBUG
spill_or_hint_ = compiler::ConstantOperand(
compiler::UnallocatedOperand::cast(result().operand())
.virtual_register());
}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -25,7 +25,9 @@
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc-data.h"
#ifdef V8_TARGET_ARCH_X64
#ifdef V8_TARGET_ARCH_ARM64
#include "src/codegen/arm64/register-arm64.h"
#elif V8_TARGET_ARCH_X64
#include "src/codegen/x64/register-x64.h"
#else
#error "Maglev does not supported this architecture."

View File

@ -74,6 +74,20 @@ Register MaglevAssembler::FromAnyToRegister(const Input& input,
}
}
inline MemOperand MaglevAssembler::GetStackSlot(
const compiler::AllocatedOperand& operand) {
return MemOperand(rbp, GetFramePointerOffsetForStackSlot(operand));
}
inline MemOperand MaglevAssembler::ToMemOperand(
const compiler::InstructionOperand& operand) {
return GetStackSlot(compiler::AllocatedOperand::cast(operand));
}
inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) {
return ToMemOperand(location.operand());
}
inline void MaglevAssembler::DefineLazyDeoptPoint(LazyDeoptInfo* info) {
info->set_deopting_call_return_pc(pc_offset_for_safepoint());
code_gen_state()->PushLazyDeopt(info);

View File

@ -36,13 +36,6 @@ namespace v8 {
namespace internal {
namespace maglev {
const char* OpcodeToString(Opcode opcode) {
#define DEF_NAME(Name) #Name,
static constexpr const char* const names[] = {NODE_BASE_LIST(DEF_NAME)};
#undef DEF_NAME
return names[static_cast<int>(opcode)];
}
#define __ masm->
namespace {
@ -135,151 +128,8 @@ RegList GetGeneralRegistersUsedAsInputs(const EagerDeoptInfo* deopt_info) {
// when non-empty.
#define DCHECK_REGLIST_EMPTY(...) DCHECK_EQ((__VA_ARGS__), RegList{})
// ---
// Print
// ---
void PrintInputs(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeBase* node) {
if (!node->has_inputs()) return;
os << " [";
for (int i = 0; i < node->input_count(); i++) {
if (i != 0) os << ", ";
graph_labeller->PrintInput(os, node->input(i));
}
os << "]";
}
void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeBase* node) {}
void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const ValueNode* node) {
os << "" << node->result().operand();
if (node->has_valid_live_range()) {
os << ", live range: [" << node->live_range().start << "-"
<< node->live_range().end << "]";
}
}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeBase* node) {}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const UnconditionalControlNode* node) {
os << " b" << graph_labeller->BlockId(node->target());
}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const BranchControlNode* node) {
os << " b" << graph_labeller->BlockId(node->if_true()) << " b"
<< graph_labeller->BlockId(node->if_false());
}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const Switch* node) {
for (int i = 0; i < node->size(); i++) {
const BasicBlockRef& target = node->Cast<Switch>()->targets()[i];
os << " b" << graph_labeller->BlockId(target.block_ptr());
}
if (node->Cast<Switch>()->has_fallthrough()) {
BasicBlock* fallthrough_target = node->Cast<Switch>()->fallthrough();
os << " b" << graph_labeller->BlockId(fallthrough_target);
}
}
class MaybeUnparkForPrint {
public:
MaybeUnparkForPrint() {
LocalHeap* local_heap = LocalHeap::Current();
if (!local_heap) {
local_heap = Isolate::Current()->main_thread_local_heap();
}
DCHECK_NOT_NULL(local_heap);
if (local_heap->IsParked()) {
scope_.emplace(local_heap);
}
}
private:
base::Optional<UnparkedScope> scope_;
};
template <typename NodeT>
void PrintImpl(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeT* node, bool skip_targets) {
MaybeUnparkForPrint unpark;
os << node->opcode();
node->PrintParams(os, graph_labeller);
PrintInputs(os, graph_labeller, node);
PrintResult(os, graph_labeller, node);
if (!skip_targets) {
PrintTargets(os, graph_labeller, node);
}
}
} // namespace
void NodeBase::Print(std::ostream& os, MaglevGraphLabeller* graph_labeller,
bool skip_targets) const {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return PrintImpl(os, graph_labeller, this->Cast<Name>(), skip_targets);
NODE_BASE_LIST(V)
#undef V
}
UNREACHABLE();
}
void NodeBase::Print() const {
MaglevGraphLabeller labeller;
Print(std::cout, &labeller);
std::cout << std::endl;
}
namespace {
size_t GetInputLocationsArraySize(const DeoptFrame& top_frame) {
size_t size = 0;
const DeoptFrame* frame = &top_frame;
do {
switch (frame->type()) {
case DeoptFrame::FrameType::kInterpretedFrame:
size += frame->as_interpreted().frame_state()->size(
frame->as_interpreted().unit());
break;
case DeoptFrame::FrameType::kBuiltinContinuationFrame:
size += frame->as_builtin_continuation().parameters().size() + 1;
break;
}
frame = frame->parent();
} while (frame != nullptr);
return size;
}
} // namespace
DeoptInfo::DeoptInfo(Zone* zone, DeoptFrame top_frame,
compiler::FeedbackSource feedback_to_update)
: top_frame_(top_frame),
feedback_to_update_(feedback_to_update),
input_locations_(zone->NewArray<InputLocation>(
GetInputLocationsArraySize(top_frame))) {
// Initialise InputLocations so that they correctly don't have a next use id.
for (size_t i = 0; i < GetInputLocationsArraySize(top_frame); ++i) {
new (&input_locations_[i]) InputLocation();
}
}
bool LazyDeoptInfo::IsResultRegister(interpreter::Register reg) const {
if (V8_LIKELY(result_size_ == 1)) {
return reg == result_location_;
}
DCHECK_EQ(result_size_, 2);
return reg == result_location_ ||
reg == interpreter::Register(result_location_.index() + 1);
}
// ---
// Nodes
// ---
@ -350,25 +200,6 @@ Handle<Object> ValueNode::Reify(LocalIsolate* isolate) {
}
}
void ValueNode::SetNoSpillOrHint() {
DCHECK_EQ(state_, kLastUse);
DCHECK(!IsConstantNode(opcode()));
#ifdef DEBUG
state_ = kSpillOrHint;
#endif // DEBUG
spill_or_hint_ = compiler::InstructionOperand();
}
void ValueNode::SetConstantLocation() {
DCHECK(IsConstantNode(opcode()));
#ifdef DEBUG
state_ = kSpillOrHint;
#endif // DEBUG
spill_or_hint_ = compiler::ConstantOperand(
compiler::UnallocatedOperand::cast(result().operand())
.virtual_register());
}
void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
DefineAsConstant(vreg_state, this);
}
@ -777,17 +608,6 @@ void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
}
void RootConstant::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {}
bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const {
switch (index_) {
case RootIndex::kFalseValue:
case RootIndex::kNullValue:
case RootIndex::kUndefinedValue:
case RootIndex::kempty_string:
return false;
default:
return true;
}
}
void RootConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
__ LoadRoot(reg, index());
}