[maglev] Move arch-independent code generator

Bug: v8:7700
Change-Id: I83e13bb6c19716e14576a957cf94b81371417808
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4063691
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84566}
This commit is contained in:
Victor Gomes 2022-11-30 11:03:35 +01:00 committed by V8 LUCI CQ
parent 0c18a3a577
commit 2900117db9
12 changed files with 635 additions and 381 deletions

View File

@ -4795,6 +4795,7 @@ v8_source_set("v8_base_without_compiler") {
if (v8_enable_maglev) {
sources += [
"src/maglev/maglev-code-generator.cc",
"src/maglev/maglev-compilation-info.cc",
"src/maglev/maglev-compilation-unit.cc",
"src/maglev/maglev-compiler.cc",
@ -4809,13 +4810,11 @@ v8_source_set("v8_base_without_compiler") {
if (v8_current_cpu == "arm64") {
sources += [
"src/maglev/arm64/maglev-assembler-arm64.cc",
"src/maglev/arm64/maglev-code-generator-arm64.cc",
"src/maglev/arm64/maglev-ir-arm64.cc",
]
} else if (v8_current_cpu == "x64") {
sources += [
"src/maglev/x64/maglev-assembler-x64.cc",
"src/maglev/x64/maglev-code-generator-x64.cc",
"src/maglev/x64/maglev-ir-x64.cc",
]
}

View File

@ -374,7 +374,7 @@ void Assembler::AllocateAndInstallRequestedHeapNumbers(Isolate* isolate) {
}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
SafepointTableBuilderBase* safepoint_table_builder,
int handler_table_offset) {
// As a crutch to avoid having to add manual Align calls wherever we use a
// raw workflow to create Code objects (mostly in tests), add another Align

View File

@ -190,9 +190,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
static constexpr SafepointTableBuilderBase* kNoSafepointTable = nullptr;
void GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
SafepointTableBuilderBase* safepoint_table_builder,
int handler_table_offset);
// Convenience wrapper for code without safepoint or handler tables.

View File

@ -1798,10 +1798,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
inline void PushAll(RegList registers) { PushXRegList(registers); }
inline void PopAll(RegList registers) { PopXRegList(registers); }
inline void PushAll(DoubleRegList registers, int stack_slot_size) {
inline void PushAll(DoubleRegList registers,
int stack_slot_size = kDoubleSize) {
PushQRegList(registers);
}
inline void PopAll(DoubleRegList registers, int stack_slot_size) {
inline void PopAll(DoubleRegList registers,
int stack_slot_size = kDoubleSize) {
PopQRegList(registers);
}

View File

@ -0,0 +1,111 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_ARM64_MAGLEV_ASSEMBLER_ARM64_INL_H_
#define V8_MAGLEV_ARM64_MAGLEV_ASSEMBLER_ARM64_INL_H_
#include "src/codegen/macro-assembler-inl.h"
#include "src/maglev/maglev-assembler.h"
#include "src/maglev/maglev-basic-block.h"
#include "src/maglev/maglev-code-gen-state.h"
namespace v8 {
namespace internal {
namespace maglev {
constexpr Register kScratchRegister = x16;
constexpr DoubleRegister kScratchDoubleReg = d30;
inline MemOperand MaglevAssembler::StackSlotOperand(StackSlot slot) {
// TODO(v8:7700): Implement!
UNREACHABLE();
return MemOperand();
}
inline MemOperand MaglevAssembler::GetStackSlot(
const compiler::AllocatedOperand& operand) {
// TODO(v8:7700): Implement!
UNREACHABLE();
return MemOperand();
}
inline MemOperand MaglevAssembler::ToMemOperand(
const compiler::InstructionOperand& operand) {
return GetStackSlot(compiler::AllocatedOperand::cast(operand));
}
inline MemOperand MaglevAssembler::ToMemOperand(const ValueLocation& location) {
return ToMemOperand(location.operand());
}
inline void MaglevAssembler::Move(StackSlot dst, Register src) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::Move(StackSlot dst, DoubleRegister src) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::Move(Register dst, StackSlot src) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::Move(DoubleRegister dst, StackSlot src) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::Move(MemOperand dst, Register src) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::Move(MemOperand dst, DoubleRegister src) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::Move(Register dst, MemOperand src) {
MacroAssembler::Move(dst, src);
}
inline void MaglevAssembler::Move(DoubleRegister dst, MemOperand src) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::Move(DoubleRegister dst, DoubleRegister src) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::Move(Register dst, Smi src) {
MacroAssembler::Move(dst, src);
}
inline void MaglevAssembler::Move(Register dst, Register src) {
MacroAssembler::Move(dst, src);
}
inline void MaglevAssembler::Move(Register dst, Immediate i) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::Move(DoubleRegister dst, double n) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::AssertStackSizeCorrect() {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
inline void MaglevAssembler::MaterialiseValueNode(Register dst,
ValueNode* value) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_ARM64_MAGLEV_ASSEMBLER_ARM64_INL_H_

View File

@ -9,7 +9,19 @@ namespace v8 {
namespace internal {
namespace maglev {
void MaglevAssembler::Prologue(Graph* graph,
Label* deferred_flags_need_processing,
Label* deferred_call_stack_guard,
Label* deferred_call_stack_guard_return) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
void MaglevAssembler::DeferredPrologue(
Graph* graph, Label* deferred_flags_need_processing,
Label* deferred_call_stack_guard, Label* deferred_call_stack_guard_return) {
// TODO(v8:7700): Implement!
UNREACHABLE();
}
} // namespace maglev
} // namespace internal

View File

@ -13,6 +13,7 @@ namespace v8 {
namespace internal {
namespace maglev {
class Graph;
class MaglevAssembler;
// Label allowed to be passed to deferred code.
@ -35,6 +36,11 @@ class ZoneLabelRef {
explicit ZoneLabelRef(Label* label) : label_(label) {}
};
// The slot index is the offset from the frame pointer.
struct StackSlot {
uint32_t index;
};
class MaglevAssembler : public MacroAssembler {
public:
explicit MaglevAssembler(Isolate* isolate, MaglevCodeGenState* code_gen_state)
@ -134,6 +140,34 @@ class MaglevAssembler : public MacroAssembler {
inline void EmitEagerDeoptIf(Condition cond, DeoptimizeReason reason,
NodeT* node);
inline void MaterialiseValueNode(Register dst, ValueNode* value);
inline MemOperand StackSlotOperand(StackSlot slot);
inline void Move(StackSlot dst, Register src);
inline void Move(StackSlot dst, DoubleRegister src);
inline void Move(Register dst, StackSlot src);
inline void Move(DoubleRegister dst, StackSlot src);
inline void Move(MemOperand dst, Register src);
inline void Move(MemOperand dst, DoubleRegister src);
inline void Move(Register dst, MemOperand src);
inline void Move(DoubleRegister dst, MemOperand src);
inline void Move(DoubleRegister dst, DoubleRegister src);
inline void Move(Register dst, Smi src);
inline void Move(Register dst, Register src);
inline void Move(Register dst, TaggedIndex i);
inline void Move(Register dst, Immediate i);
inline void Move(DoubleRegister dst, double n);
inline void Move(Register dst, Handle<HeapObject> obj);
void Prologue(Graph* graph, Label* deferred_flags_need_processing,
Label* deferred_call_stack_guard,
Label* deferred_call_stack_guard_return);
void DeferredPrologue(Graph* graph, Label* deferred_flags_need_processing,
Label* deferred_call_stack_guard,
Label* deferred_call_stack_guard_return);
inline void AssertStackSizeCorrect();
compiler::NativeContextRef native_context() const {
return code_gen_state()->broker()->target_native_context();
}
@ -197,6 +231,9 @@ class SaveRegisterStateForCall {
RegisterSnapshot snapshot_;
};
ZoneLabelRef::ZoneLabelRef(MaglevAssembler* masm)
: ZoneLabelRef(masm->compilation_info()->zone()) {}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-code-generator.h"
#include <algorithm>
#include "src/base/hashmap.h"
@ -19,7 +21,6 @@
#include "src/execution/frame-constants.h"
#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-code-generator.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
@ -27,10 +28,17 @@
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc-data.h"
#include "src/maglev/x64/maglev-assembler-x64-inl.h"
#include "src/objects/code-inl.h"
#include "src/utils/identity-map.h"
#ifdef V8_TARGET_ARCH_ARM64
#include "src/maglev/arm64/maglev-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_X64
#include "src/maglev/x64/maglev-assembler-x64-inl.h"
#else
#error "Maglev does not supported this architecture."
#endif
namespace v8 {
namespace internal {
namespace maglev {
@ -128,7 +136,7 @@ class ParallelMoveResolver {
}
for (auto [stack_slot, node] : materializing_stack_slot_moves_) {
node->LoadToRegister(masm_, kScratchRegT);
EmitStackMove(stack_slot, kScratchRegT);
__ Move(StackSlot{stack_slot}, kScratchRegT);
}
}
@ -306,7 +314,7 @@ class ParallelMoveResolver {
__ RecordComment("-- * Cycle");
DCHECK(!scratch_has_cycle_start_);
if constexpr (std::is_same_v<ChainStartT, uint32_t>) {
EmitStackMove(kScratchRegT, chain_start);
__ Move(kScratchRegT, StackSlot{chain_start});
} else {
__ Move(kScratchRegT, chain_start);
}
@ -352,7 +360,7 @@ class ParallelMoveResolver {
for (uint32_t target_slot : targets.stack_slots) {
DCHECK_EQ(moves_from_stack_slot_.find(target_slot),
moves_from_stack_slot_.end());
EmitStackMove(target_slot, source_reg);
__ Move(StackSlot{target_slot}, source_reg);
}
}
@ -380,40 +388,16 @@ class ParallelMoveResolver {
// Now emit moves from that cached register instead of from the stack slot.
DCHECK(register_with_slot_value.is_valid());
DCHECK(moves_from_register_[register_with_slot_value.code()].is_empty());
EmitStackMove(register_with_slot_value, source_slot);
__ Move(register_with_slot_value, StackSlot{source_slot});
EmitMovesFromSource(register_with_slot_value, std::move(targets));
}
// The slot index used for representing slots in the move graph is the offset
// from the frame pointer. These helpers help translate this into an actual
// machine move.
void EmitStackMove(uint32_t target_slot, Register source_reg) {
__ movq(MemOperand(rbp, target_slot), source_reg);
}
void EmitStackMove(uint32_t target_slot, DoubleRegister source_reg) {
__ Movsd(MemOperand(rbp, target_slot), source_reg);
}
void EmitStackMove(Register target_reg, uint32_t source_slot) {
__ movq(target_reg, MemOperand(rbp, source_slot));
}
void EmitStackMove(DoubleRegister target_reg, uint32_t source_slot) {
__ Movsd(target_reg, MemOperand(rbp, source_slot));
}
void Push(Register reg) { __ Push(reg); }
void Push(DoubleRegister reg) { __ PushAll({reg}); }
void Push(uint32_t stack_slot) {
__ movq(kScratchRegister, MemOperand(rbp, stack_slot));
__ movq(MemOperand(rsp, -1), kScratchRegister);
}
void Pop(Register reg) { __ Pop(reg); }
void Pop(DoubleRegister reg) { __ PopAll({reg}); }
void Pop(uint32_t stack_slot) {
__ movq(kScratchRegister, MemOperand(rsp, -1));
__ movq(MemOperand(rbp, stack_slot), kScratchRegister);
}
MacroAssembler* masm() const { return masm_; }
MaglevAssembler* masm() const { return masm_; }
MaglevAssembler* const masm_;
@ -426,6 +410,7 @@ class ParallelMoveResolver {
std::array<GapMoveTargets, RegisterT::kNumRegisters> moves_from_register_ =
{};
// TODO(victorgomes): Use MaglevAssembler::StackSlot instead of uint32_t.
// moves_from_stack_slot_[source] = target.
std::unordered_map<uint32_t, GapMoveTargets> moves_from_stack_slot_;
@ -509,7 +494,7 @@ class ExceptionHandlerTrampolineBuilder {
__ RecordComment("-- Exception handler trampoline END");
}
MacroAssembler* masm() const { return masm_; }
MaglevAssembler* masm() const { return masm_; }
void RecordMoves(const MaglevCompilationUnit& unit, BasicBlock* catch_block,
const CompactInterpreterFrameState* register_frame,
@ -578,7 +563,7 @@ class ExceptionHandlerTrampolineBuilder {
// We consider constants after all other operations, since constants
// don't need to call NewHeapNumber.
if (IsConstantNode(move.source->opcode())) continue;
MaterialiseNonConstantTo(move.source, kReturnRegister0);
__ MaterialiseValueNode(kReturnRegister0, move.source);
__ Push(kReturnRegister0);
}
}
@ -593,249 +578,42 @@ class ExceptionHandlerTrampolineBuilder {
? target.AssignedGeneralRegister()
: kScratchRegister;
if (IsConstantNode(move.source->opcode())) {
MaterialiseConstantTo(move.source, target_reg);
__ MaterialiseValueNode(target_reg, move.source);
} else {
__ Pop(target_reg);
}
if (target_reg == kScratchRegister) {
__ movq(masm_->ToMemOperand(target.operand()), kScratchRegister);
__ Move(masm_->ToMemOperand(target.operand()), kScratchRegister);
}
}
if (save_accumulator) __ Pop(kReturnRegister0);
}
void MaterialiseNonConstantTo(ValueNode* value, Register dst) const {
DCHECK(!value->allocation().IsConstant());
using D = NewHeapNumberDescriptor;
switch (value->properties().value_representation()) {
case ValueRepresentation::kInt32: {
Label done;
__ movl(dst, ToMemOperand(value));
__ addl(dst, dst);
__ j(no_overflow, &done, Label::kNear);
// If we overflow, instead of bailing out (deopting), we change
// representation to a HeapNumber.
__ Cvtlsi2sd(D::GetDoubleRegisterParameter(D::kValue),
ToMemOperand(value));
__ CallBuiltin(Builtin::kNewHeapNumber);
__ Move(dst, kReturnRegister0);
__ bind(&done);
break;
}
case ValueRepresentation::kUint32: {
Label done, tag_smi;
__ movl(dst, ToMemOperand(value));
// Unsigned comparison against Smi::kMaxValue.
__ cmpl(dst, Immediate(Smi::kMaxValue));
// If we don't fit in a Smi, instead of bailing out (deopting), we
// change representation to a HeapNumber.
__ j(below_equal, &tag_smi, Label::kNear);
// The value was loaded with movl, so is zero extended in 64-bit.
// Therefore, we can do an unsigned 32-bit converstion to double with a
// 64-bit signed conversion (Cvt_q_si2sd instead of Cvt_l_si2sd).
__ Cvtqsi2sd(D::GetDoubleRegisterParameter(D::kValue),
ToMemOperand(value));
__ CallBuiltin(Builtin::kNewHeapNumber);
__ Move(dst, kReturnRegister0);
__ jmp(&done, Label::kNear);
__ bind(&tag_smi);
__ SmiTag(dst);
__ bind(&done);
break;
}
case ValueRepresentation::kFloat64:
__ Movsd(D::GetDoubleRegisterParameter(D::kValue), ToMemOperand(value));
__ CallBuiltin(Builtin::kNewHeapNumber);
__ Move(dst, kReturnRegister0);
break;
case ValueRepresentation::kTagged:
UNREACHABLE();
}
}
void MaterialiseConstantTo(ValueNode* value, Register dst) const {
DCHECK(value->allocation().IsConstant());
switch (value->opcode()) {
case Opcode::kInt32Constant: {
int32_t int_value = value->Cast<Int32Constant>()->value();
if (Smi::IsValid(int_value)) {
__ Move(dst, Smi::FromInt(int_value));
} else {
__ movq_heap_number(dst, int_value);
}
break;
}
case Opcode::kFloat64Constant: {
double double_value = value->Cast<Float64Constant>()->value();
__ movq_heap_number(dst, double_value);
break;
}
default:
UNREACHABLE();
}
}
MemOperand ToMemOperand(ValueNode* node) const {
DCHECK(node->allocation().IsAnyStackSlot());
return masm_->ToMemOperand(node->allocation());
}
MemOperand ToMemOperand(const ValueLocation& location) const {
DCHECK(location.operand().IsStackSlot());
return masm_->ToMemOperand(location.operand());
}
MaglevAssembler* const masm_;
};
class MaglevCodeGeneratingNodeProcessor {
public:
explicit MaglevCodeGeneratingNodeProcessor(MaglevAssembler* masm)
: masm_(masm) {}
: masm_(masm),
deferred_call_stack_guard_(masm),
deferred_call_stack_guard_return_(masm),
deferred_flags_need_processing_(masm) {}
void PreProcessGraph(Graph* graph) {
code_gen_state()->set_untagged_slots(graph->untagged_stack_slots());
code_gen_state()->set_tagged_slots(graph->tagged_stack_slots());
if (v8_flags.maglev_break_on_entry) {
__ int3();
__ Prologue(graph, *deferred_flags_need_processing_,
*deferred_call_stack_guard_,
*deferred_call_stack_guard_return_);
}
if (v8_flags.maglev_ool_prologue) {
// Call the out-of-line prologue (with parameters passed on the stack).
__ Push(Immediate(code_gen_state()->stack_slots() * kSystemPointerSize));
__ Push(Immediate(code_gen_state()->tagged_slots() * kSystemPointerSize));
__ CallBuiltin(Builtin::kMaglevOutOfLinePrologue);
} else {
__ BailoutIfDeoptimized(rbx);
// Tiering support.
// TODO(jgruber): Extract to a builtin (the tiering prologue is ~230 bytes
// per Maglev code object on x64).
{
// Scratch registers. Don't clobber regs related to the calling
// convention (e.g. kJavaScriptCallArgCountRegister). Keep up-to-date
// with deferred flags code.
Register flags = rcx;
Register feedback_vector = r9;
// Load the feedback vector.
__ LoadTaggedPointerField(
feedback_vector,
FieldOperand(kJSFunctionRegister, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ AssertFeedbackVector(feedback_vector);
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::MAGLEV,
&deferred_flags_need_processing_);
}
__ EnterFrame(StackFrame::MAGLEV);
// Save arguments in frame.
// TODO(leszeks): Consider eliding this frame if we don't make any calls
// that could clobber these registers.
__ Push(kContextRegister);
__ Push(kJSFunctionRegister); // Callee's JS function.
__ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
{
ASM_CODE_COMMENT_STRING(masm(), " Stack/interrupt check");
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
// interrupt limit. The interrupt limit is either equal to the real
// stack limit or tighter. By ensuring we have space until that limit
// after building the frame we can quickly precheck both at once.
__ Move(kScratchRegister, rsp);
// TODO(leszeks): Include a max call argument size here.
__ subq(kScratchRegister, Immediate(code_gen_state()->stack_slots() *
kSystemPointerSize));
__ cmpq(kScratchRegister,
__ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
__ j(below, &deferred_call_stack_guard_);
__ bind(&deferred_call_stack_guard_return_);
}
// Initialize stack slots.
if (graph->tagged_stack_slots() > 0) {
ASM_CODE_COMMENT_STRING(masm(), "Initializing stack slots");
// TODO(leszeks): Consider filling with xmm + movdqa instead.
__ Move(rax, Immediate(0));
// Magic value. Experimentally, an unroll size of 8 doesn't seem any
// worse than fully unrolled pushes.
const int kLoopUnrollSize = 8;
int tagged_slots = graph->tagged_stack_slots();
if (tagged_slots < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill
// completely.
for (int i = 0; i < tagged_slots; ++i) {
__ pushq(rax);
}
} else {
// Extract the first few slots to round to the unroll size.
int first_slots = tagged_slots % kLoopUnrollSize;
for (int i = 0; i < first_slots; ++i) {
__ pushq(rax);
}
__ Move(rbx, Immediate(tagged_slots / kLoopUnrollSize));
// We enter the loop unconditionally, so make sure we need to loop at
// least once.
DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
Label loop;
__ bind(&loop);
for (int i = 0; i < kLoopUnrollSize; ++i) {
__ pushq(rax);
}
__ decl(rbx);
__ j(greater, &loop);
}
}
if (graph->untagged_stack_slots() > 0) {
// Extend rsp by the size of the remaining untagged part of the frame,
// no need to initialise these.
__ subq(rsp,
Immediate(graph->untagged_stack_slots() * kSystemPointerSize));
}
}
}
void PostProcessGraph(Graph*) {
__ int3();
if (!v8_flags.maglev_ool_prologue) {
__ bind(&deferred_call_stack_guard_);
{
ASM_CODE_COMMENT_STRING(masm(), "Stack/interrupt call");
// Save any registers that can be referenced by RegisterInput.
// TODO(leszeks): Only push those that are used by the graph.
__ PushAll(RegisterInput::kAllowedRegisters);
// Push the frame size
__ Push(Immediate(Smi::FromInt(code_gen_state()->stack_slots() *
kSystemPointerSize)));
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
__ PopAll(RegisterInput::kAllowedRegisters);
__ jmp(&deferred_call_stack_guard_return_);
}
__ bind(&deferred_flags_need_processing_);
{
ASM_CODE_COMMENT_STRING(masm(), "Optimized marker check");
// See PreProcessGraph.
Register flags = rcx;
Register feedback_vector = r9;
// TODO(leszeks): This could definitely be a builtin that we tail-call.
__ OptimizeCodeOrTailCallOptimizedCodeSlot(
flags, feedback_vector, kJSFunctionRegister, JumpMode::kJump);
void PostProcessGraph(Graph* graph) {
__ Trap();
}
}
// TODO(victorgomes): Use normal deferred code mechanism in Prologue
// instead.
__ DeferredPrologue(graph, *deferred_flags_need_processing_,
*deferred_call_stack_guard_,
*deferred_call_stack_guard_return_);
}
void PreProcessBasicBlock(BasicBlock* block) {
@ -857,14 +635,7 @@ class MaglevCodeGeneratingNodeProcessor {
__ RecordComment(ss.str());
}
if (v8_flags.debug_code) {
__ movq(kScratchRegister, rbp);
__ subq(kScratchRegister, rsp);
__ cmpq(kScratchRegister,
Immediate(code_gen_state()->stack_slots() * kSystemPointerSize +
StandardFrameConstants::kFixedFrameSizeFromFp));
__ Assert(equal, AbortReason::kStackAccessBelowStackPointer);
}
__ AssertStackSizeCorrect();
// Emit Phi moves before visiting the control node.
if (std::is_base_of<UnconditionalControlNode, NodeT>::value) {
@ -883,10 +654,10 @@ class MaglevCodeGeneratingNodeProcessor {
if (!source.IsAnyStackSlot()) {
if (v8_flags.code_comments) __ RecordComment("-- Spill:");
if (source.IsRegister()) {
__ movq(masm()->GetStackSlot(value_node->spill_slot()),
__ Move(masm()->GetStackSlot(value_node->spill_slot()),
ToRegister(source));
} else {
__ Movsd(masm()->GetStackSlot(value_node->spill_slot()),
__ Move(masm()->GetStackSlot(value_node->spill_slot()),
ToDoubleRegister(source));
}
} else {
@ -1007,9 +778,9 @@ class MaglevCodeGeneratingNodeProcessor {
private:
MaglevAssembler* const masm_;
Label deferred_call_stack_guard_;
Label deferred_call_stack_guard_return_;
Label deferred_flags_need_processing_;
ZoneLabelRef deferred_call_stack_guard_;
ZoneLabelRef deferred_call_stack_guard_return_;
ZoneLabelRef deferred_flags_need_processing_;
};
class SafepointingNodeProcessor {

View File

@ -9,10 +9,20 @@
#include "src/heap/parked-scope.h"
#include "src/maglev/maglev-graph-labeller.h"
#ifdef V8_TARGET_ARCH_ARM64
#include "src/maglev/arm64/maglev-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_X64
#include "src/maglev/x64/maglev-assembler-x64-inl.h"
#else
#error "Maglev does not supported this architecture."
#endif
namespace v8 {
namespace internal {
namespace maglev {
#define __ masm->
const char* OpcodeToString(Opcode opcode) {
#define DEF_NAME(Name) #Name,
static constexpr const char* const names[] = {NODE_BASE_LIST(DEF_NAME)};
@ -338,6 +348,126 @@ void CallRuntime::VerifyInputs(MaglevGraphLabeller* graph_labeller) const {
}
}
// ---
// Reify constants
// ---
Handle<Object> ValueNode::Reify(LocalIsolate* isolate) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return this->Cast<Name>()->DoReify(isolate);
CONSTANT_VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
Handle<Object> SmiConstant::DoReify(LocalIsolate* isolate) {
return handle(value_, isolate);
}
Handle<Object> Int32Constant::DoReify(LocalIsolate* isolate) {
return isolate->factory()->NewNumber<AllocationType::kOld>(value());
}
Handle<Object> Float64Constant::DoReify(LocalIsolate* isolate) {
return isolate->factory()->NewNumber<AllocationType::kOld>(value_);
}
Handle<Object> Constant::DoReify(LocalIsolate* isolate) {
return object_.object();
}
Handle<Object> RootConstant::DoReify(LocalIsolate* isolate) {
return isolate->root_handle(index());
}
// ---
// Load node to registers
// ---
namespace {
template <typename NodeT>
void LoadToRegisterHelper(NodeT* node, MaglevAssembler* masm, Register reg) {
if constexpr (NodeT::kProperties.value_representation() !=
ValueRepresentation::kFloat64) {
return node->DoLoadToRegister(masm, reg);
} else {
UNREACHABLE();
}
}
template <typename NodeT>
void LoadToRegisterHelper(NodeT* node, MaglevAssembler* masm,
DoubleRegister reg) {
if constexpr (NodeT::kProperties.value_representation() ==
ValueRepresentation::kFloat64) {
return node->DoLoadToRegister(masm, reg);
} else {
UNREACHABLE();
}
}
} // namespace
void ValueNode::LoadToRegister(MaglevAssembler* masm, Register reg) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return LoadToRegisterHelper(this->Cast<Name>(), masm, reg);
VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void ValueNode::LoadToRegister(MaglevAssembler* masm, DoubleRegister reg) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return LoadToRegisterHelper(this->Cast<Name>(), masm, reg);
VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void ValueNode::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
DCHECK(is_spilled());
DCHECK(!use_double_register());
__ Move(reg,
masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot())));
}
void ValueNode::DoLoadToRegister(MaglevAssembler* masm, DoubleRegister reg) {
DCHECK(is_spilled());
DCHECK(use_double_register());
__ Move(reg,
masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot())));
}
void SmiConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
__ Move(reg, Immediate(value()));
}
void Int32Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
__ Move(reg, Immediate(value()));
}
void Float64Constant::DoLoadToRegister(MaglevAssembler* masm,
DoubleRegister reg) {
__ Move(reg, value());
}
void Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
__ Move(reg, object_.object());
}
void RootConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
__ LoadRoot(reg, index());
}
// ---
// Print params
// ---

View File

@ -9,6 +9,7 @@
#include <type_traits>
#include <utility>
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/maglev/maglev-assembler.h"
#include "src/maglev/maglev-basic-block.h"
@ -18,9 +19,6 @@ namespace v8 {
namespace internal {
namespace maglev {
ZoneLabelRef::ZoneLabelRef(MaglevAssembler* masm)
: ZoneLabelRef(masm->compilation_info()->zone()) {}
void MaglevAssembler::Branch(Condition condition, BasicBlock* if_true,
BasicBlock* if_false, BasicBlock* next_block) {
// We don't have any branch probability information, so try to jump
@ -173,6 +171,150 @@ inline void MaglevAssembler::ReverseByteOrder(Register value, int size) {
}
}
inline MemOperand MaglevAssembler::StackSlotOperand(StackSlot stack_slot) {
return MemOperand(rbp, stack_slot.index);
}
inline void MaglevAssembler::Move(StackSlot dst, Register src) {
movq(StackSlotOperand(dst), src);
}
inline void MaglevAssembler::Move(StackSlot dst, DoubleRegister src) {
Movsd(StackSlotOperand(dst), src);
}
inline void MaglevAssembler::Move(Register dst, StackSlot src) {
movq(dst, StackSlotOperand(src));
}
inline void MaglevAssembler::Move(DoubleRegister dst, StackSlot src) {
Movsd(dst, StackSlotOperand(src));
}
inline void MaglevAssembler::Move(MemOperand dst, Register src) {
movq(dst, src);
}
inline void MaglevAssembler::Move(MemOperand dst, DoubleRegister src) {
Movsd(dst, src);
}
inline void MaglevAssembler::Move(Register dst, TaggedIndex i) {
MacroAssembler::Move(dst, i);
}
inline void MaglevAssembler::Move(DoubleRegister dst, DoubleRegister src) {
MacroAssembler::Move(dst, src);
}
inline void MaglevAssembler::Move(Register dst, Smi src) {
MacroAssembler::Move(dst, src);
}
inline void MaglevAssembler::Move(Register dst, MemOperand src) {
MacroAssembler::Move(dst, src);
}
inline void MaglevAssembler::Move(DoubleRegister dst, MemOperand src) {
Movsd(dst, src);
}
inline void MaglevAssembler::Move(Register dst, Register src) {
MacroAssembler::Move(dst, src);
}
inline void MaglevAssembler::Move(Register dst, Immediate i) {
MacroAssembler::Move(dst, i);
}
inline void MaglevAssembler::Move(DoubleRegister dst, double n) {
MacroAssembler::Move(dst, n);
}
inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
MacroAssembler::Move(dst, obj);
}
inline void MaglevAssembler::MaterialiseValueNode(Register dst,
ValueNode* value) {
switch (value->opcode()) {
case Opcode::kInt32Constant: {
int32_t int_value = value->Cast<Int32Constant>()->value();
if (Smi::IsValid(int_value)) {
Move(dst, Smi::FromInt(int_value));
} else {
movq_heap_number(dst, int_value);
}
break;
}
case Opcode::kFloat64Constant: {
double double_value = value->Cast<Float64Constant>()->value();
movq_heap_number(dst, double_value);
break;
}
default:
break;
}
DCHECK(!value->allocation().IsConstant());
DCHECK(value->allocation().IsAnyStackSlot());
using D = NewHeapNumberDescriptor;
MemOperand src = ToMemOperand(value->allocation());
switch (value->properties().value_representation()) {
case ValueRepresentation::kInt32: {
Label done;
movl(dst, src);
addl(dst, dst);
j(no_overflow, &done, Label::kNear);
// If we overflow, instead of bailing out (deopting), we change
// representation to a HeapNumber.
Cvtlsi2sd(D::GetDoubleRegisterParameter(D::kValue), src);
CallBuiltin(Builtin::kNewHeapNumber);
Move(dst, kReturnRegister0);
bind(&done);
break;
}
case ValueRepresentation::kUint32: {
Label done, tag_smi;
movl(dst, src);
// Unsigned comparison against Smi::kMaxValue.
cmpl(dst, Immediate(Smi::kMaxValue));
// If we don't fit in a Smi, instead of bailing out (deopting), we
// change representation to a HeapNumber.
j(below_equal, &tag_smi, Label::kNear);
// The value was loaded with movl, so is zero extended in 64-bit.
// Therefore, we can do an unsigned 32-bit converstion to double with a
// 64-bit signed conversion (Cvt_q_si2sd instead of Cvt_l_si2sd).
Cvtqsi2sd(D::GetDoubleRegisterParameter(D::kValue), src);
CallBuiltin(Builtin::kNewHeapNumber);
Move(dst, kReturnRegister0);
jmp(&done, Label::kNear);
bind(&tag_smi);
SmiTag(dst);
bind(&done);
break;
}
case ValueRepresentation::kFloat64:
Movsd(D::GetDoubleRegisterParameter(D::kValue), src);
CallBuiltin(Builtin::kNewHeapNumber);
Move(dst, kReturnRegister0);
break;
case ValueRepresentation::kTagged:
UNREACHABLE();
}
}
inline void MaglevAssembler::AssertStackSizeCorrect() {
if (v8_flags.debug_code) {
movq(kScratchRegister, rbp);
subq(kScratchRegister, rsp);
cmpq(kScratchRegister,
Immediate(code_gen_state()->stack_slots() * kSystemPointerSize +
StandardFrameConstants::kFixedFrameSizeFromFp));
Assert(equal, AbortReason::kStackAccessBelowStackPointer);
}
}
// ---
// Deferred code handling.
// ---

View File

@ -4,6 +4,7 @@
#include "src/codegen/interface-descriptors-inl.h"
#include "src/common/globals.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/x64/maglev-assembler-x64-inl.h"
#include "src/objects/heap-number.h"
@ -53,7 +54,8 @@ void MaglevAssembler::Allocate(RegisterSnapshot& register_snapshot,
{
SaveRegisterStateForCall save_register_state(masm, register_snapshot);
using D = AllocateDescriptor;
__ Move(D::GetRegisterParameter(D::kRequestedSize), size_in_bytes);
__ Move(D::GetRegisterParameter(D::kRequestedSize),
Immediate(size_in_bytes));
__ CallBuiltin(builtin);
save_register_state.DefineSafepoint();
__ Move(object, kReturnRegister0);
@ -376,6 +378,150 @@ void MaglevAssembler::TruncateDoubleToInt32(Register dst, DoubleRegister src) {
bind(*done);
}
void MaglevAssembler::Prologue(Graph* graph,
Label* deferred_flags_need_processing,
Label* deferred_call_stack_guard,
Label* deferred_call_stack_guard_return) {
code_gen_state()->set_untagged_slots(graph->untagged_stack_slots());
code_gen_state()->set_tagged_slots(graph->tagged_stack_slots());
if (v8_flags.maglev_break_on_entry) {
int3();
}
if (v8_flags.maglev_ool_prologue) {
// Call the out-of-line prologue (with parameters passed on the stack).
Push(Immediate(code_gen_state()->stack_slots() * kSystemPointerSize));
Push(Immediate(code_gen_state()->tagged_slots() * kSystemPointerSize));
CallBuiltin(Builtin::kMaglevOutOfLinePrologue);
} else {
BailoutIfDeoptimized(rbx);
// Tiering support.
// TODO(jgruber): Extract to a builtin (the tiering prologue is ~230 bytes
// per Maglev code object on x64).
{
// Scratch registers. Don't clobber regs related to the calling
// convention (e.g. kJavaScriptCallArgCountRegister). Keep up-to-date
// with deferred flags code.
Register flags = rcx;
Register feedback_vector = r9;
// Load the feedback vector.
LoadTaggedPointerField(
feedback_vector,
FieldOperand(kJSFunctionRegister, JSFunction::kFeedbackCellOffset));
LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset));
AssertFeedbackVector(feedback_vector);
LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::MAGLEV,
deferred_flags_need_processing);
}
EnterFrame(StackFrame::MAGLEV);
// Save arguments in frame.
// TODO(leszeks): Consider eliding this frame if we don't make any calls
// that could clobber these registers.
Push(kContextRegister);
Push(kJSFunctionRegister); // Callee's JS function.
Push(kJavaScriptCallArgCountRegister); // Actual argument count.
{
ASM_CODE_COMMENT_STRING(this, " Stack/interrupt check");
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
// interrupt limit. The interrupt limit is either equal to the real
// stack limit or tighter. By ensuring we have space until that limit
// after building the frame we can quickly precheck both at once.
Move(kScratchRegister, rsp);
// TODO(leszeks): Include a max call argument size here.
subq(kScratchRegister,
Immediate(code_gen_state()->stack_slots() * kSystemPointerSize));
cmpq(kScratchRegister,
StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
j(below, deferred_call_stack_guard);
bind(deferred_call_stack_guard_return);
}
// Initialize stack slots.
if (graph->tagged_stack_slots() > 0) {
ASM_CODE_COMMENT_STRING(this, "Initializing stack slots");
// TODO(leszeks): Consider filling with xmm + movdqa instead.
Move(rax, Immediate(0));
// Magic value. Experimentally, an unroll size of 8 doesn't seem any
// worse than fully unrolled pushes.
const int kLoopUnrollSize = 8;
int tagged_slots = graph->tagged_stack_slots();
if (tagged_slots < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill
// completely.
for (int i = 0; i < tagged_slots; ++i) {
pushq(rax);
}
} else {
// Extract the first few slots to round to the unroll size.
int first_slots = tagged_slots % kLoopUnrollSize;
for (int i = 0; i < first_slots; ++i) {
pushq(rax);
}
Move(rbx, Immediate(tagged_slots / kLoopUnrollSize));
// We enter the loop unconditionally, so make sure we need to loop at
// least once.
DCHECK_GT(tagged_slots / kLoopUnrollSize, 0);
Label loop;
bind(&loop);
for (int i = 0; i < kLoopUnrollSize; ++i) {
pushq(rax);
}
decl(rbx);
j(greater, &loop);
}
}
if (graph->untagged_stack_slots() > 0) {
// Extend rsp by the size of the remaining untagged part of the frame,
// no need to initialise these.
subq(rsp, Immediate(graph->untagged_stack_slots() * kSystemPointerSize));
}
}
}
void MaglevAssembler::DeferredPrologue(
Graph* graph, Label* deferred_flags_need_processing,
Label* deferred_call_stack_guard, Label* deferred_call_stack_guard_return) {
if (!v8_flags.maglev_ool_prologue) {
bind(deferred_call_stack_guard);
{
ASM_CODE_COMMENT_STRING(this, "Stack/interrupt call");
// Save any registers that can be referenced by RegisterInput.
// TODO(leszeks): Only push those that are used by the graph.
PushAll(RegisterInput::kAllowedRegisters);
// Push the frame size
Push(Immediate(
Smi::FromInt(code_gen_state()->stack_slots() * kSystemPointerSize)));
CallRuntime(Runtime::kStackGuardWithGap, 1);
PopAll(RegisterInput::kAllowedRegisters);
jmp(deferred_call_stack_guard_return);
}
bind(deferred_flags_need_processing);
{
ASM_CODE_COMMENT_STRING(this, "Optimized marker check");
// See PreProcessGraph.
Register flags = rcx;
Register feedback_vector = r9;
// TODO(leszeks): This could definitely be a builtin that we tail-call.
OptimizeCodeOrTailCallOptimizedCodeSlot(
flags, feedback_vector, kJSFunctionRegister, JumpMode::kJump);
Trap();
}
}
}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -133,109 +133,24 @@ RegList GetGeneralRegistersUsedAsInputs(const EagerDeoptInfo* deopt_info) {
// ---
// Nodes
// ---
namespace {
template <typename NodeT>
void LoadToRegisterHelper(NodeT* node, MaglevAssembler* masm, Register reg) {
if constexpr (NodeT::kProperties.value_representation() !=
ValueRepresentation::kFloat64) {
return node->DoLoadToRegister(masm, reg);
} else {
UNREACHABLE();
}
}
template <typename NodeT>
void LoadToRegisterHelper(NodeT* node, MaglevAssembler* masm,
DoubleRegister reg) {
if constexpr (NodeT::kProperties.value_representation() ==
ValueRepresentation::kFloat64) {
return node->DoLoadToRegister(masm, reg);
} else {
UNREACHABLE();
}
}
} // namespace
void ValueNode::LoadToRegister(MaglevAssembler* masm, Register reg) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return LoadToRegisterHelper(this->Cast<Name>(), masm, reg);
VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void ValueNode::LoadToRegister(MaglevAssembler* masm, DoubleRegister reg) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return LoadToRegisterHelper(this->Cast<Name>(), masm, reg);
VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void ValueNode::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
DCHECK(is_spilled());
DCHECK(!use_double_register());
__ movq(reg,
masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot())));
}
void ValueNode::DoLoadToRegister(MaglevAssembler* masm, DoubleRegister reg) {
DCHECK(is_spilled());
DCHECK(use_double_register());
__ Movsd(reg,
masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot())));
}
Handle<Object> ValueNode::Reify(LocalIsolate* isolate) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return this->Cast<Name>()->DoReify(isolate);
CONSTANT_VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
DefineAsConstant(vreg_state, this);
}
void SmiConstant::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {}
Handle<Object> SmiConstant::DoReify(LocalIsolate* isolate) {
return handle(value_, isolate);
}
void SmiConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
__ Move(reg, Immediate(value()));
}
void Float64Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
DefineAsConstant(vreg_state, this);
}
void Float64Constant::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {}
Handle<Object> Float64Constant::DoReify(LocalIsolate* isolate) {
return isolate->factory()->NewNumber<AllocationType::kOld>(value_);
}
void Float64Constant::DoLoadToRegister(MaglevAssembler* masm,
DoubleRegister reg) {
__ Move(reg, value());
}
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
DefineAsConstant(vreg_state, this);
}
void Constant::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {}
void Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
__ Move(reg, object_.object());
}
Handle<Object> Constant::DoReify(LocalIsolate* isolate) {
return object_.object();
}
void DeleteProperty::AllocateVreg(MaglevVregAllocationState* vreg_state) {
using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type;
@ -576,12 +491,6 @@ void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
}
void RootConstant::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {}
void RootConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
__ LoadRoot(reg, index());
}
Handle<Object> RootConstant::DoReify(LocalIsolate* isolate) {
return isolate->root_handle(index());
}
void CreateEmptyArrayLiteral::AllocateVreg(
MaglevVregAllocationState* vreg_state) {
@ -782,7 +691,8 @@ void GetTemplateObject::GenerateCode(MaglevAssembler* masm,
using D = GetTemplateObjectDescriptor;
__ Move(D::ContextRegister(), masm->native_context().object());
__ Move(D::GetRegisterParameter(D::kMaybeFeedbackVector), feedback().vector);
__ Move(D::GetRegisterParameter(D::kSlot), feedback().slot.ToInt());
__ Move(D::GetRegisterParameter(D::kSlot),
Immediate(feedback().slot.ToInt()));
__ Move(D::GetRegisterParameter(D::kShared), shared_function_info_.object());
__ CallBuiltin(Builtin::kGetTemplateObject);
masm->DefineExceptionHandlerAndLazyDeoptPoint(this);
@ -3132,12 +3042,6 @@ void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) {
}
void Int32Constant::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {}
void Int32Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) {
__ Move(reg, Immediate(value()));
}
Handle<Object> Int32Constant::DoReify(LocalIsolate* isolate) {
return isolate->factory()->NewNumber<AllocationType::kOld>(value());
}
void Int32ToNumber::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(input());
@ -3210,7 +3114,7 @@ void HoleyFloat64Box::GenerateCode(MaglevAssembler* masm,
DoubleRegister value = ToDoubleRegister(input());
Register object = ToRegister(result());
__ movq(object, value);
__ Move(kScratchRegister, kHoleNanInt64);
__ movq(kScratchRegister, kHoleNanInt64);
__ cmpq(object, kScratchRegister);
__ JumpToDeferredIf(
equal,