[maglev] Move Allocate and ToBoolean to MaglevAssembler

The idea is that later maglev-ir.cc would invoke higher
level macros (AllocateString, CharCodeAt, etc)
and these inline computations can then be shared.

Bug: v8:7700
Change-Id: Icb279cc335515263e1ec29c61ba318f33143f9b8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4000484
Auto-Submit: Victor Gomes <victorgomes@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84029}
This commit is contained in:
Victor Gomes 2022-11-03 09:50:08 +01:00 committed by V8 LUCI CQ
parent 2adb1df7ab
commit 91d83cf32a
5 changed files with 218 additions and 197 deletions

View File

@ -4755,6 +4755,7 @@ v8_source_set("v8_base_without_compiler") {
if (v8_enable_maglev) {
sources += [
"src/maglev/maglev-assembler.cc",
"src/maglev/maglev-code-generator.cc",
"src/maglev/maglev-compilation-info.cc",
"src/maglev/maglev-compilation-unit.cc",

View File

@ -18,6 +18,9 @@ namespace v8 {
namespace internal {
namespace maglev {
ZoneLabelRef::ZoneLabelRef(MaglevAssembler* masm)
: ZoneLabelRef(masm->compilation_info()->zone()) {}
void MaglevAssembler::Branch(Condition condition, BasicBlock* if_true,
BasicBlock* if_false, BasicBlock* next_block) {
// We don't have any branch probability information, so try to jump

View File

@ -0,0 +1,141 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/interface-descriptors-inl.h"
#include "src/maglev/maglev-assembler-inl.h"
#include "src/objects/heap-number.h"
namespace v8 {
namespace internal {
namespace maglev {
#define __ masm->
void MaglevAssembler::Allocate(RegisterSnapshot& register_snapshot,
Register object, int size_in_bytes,
AllocationType alloc_type,
AllocationAlignment alignment) {
// TODO(victorgomes): Call the runtime for large object allocation.
// TODO(victorgomes): Support double alignment.
DCHECK_EQ(alignment, kTaggedAligned);
size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
if (v8_flags.single_generation) {
alloc_type = AllocationType::kOld;
}
bool in_new_space = alloc_type == AllocationType::kYoung;
ExternalReference top =
in_new_space
? ExternalReference::new_space_allocation_top_address(isolate_)
: ExternalReference::old_space_allocation_top_address(isolate_);
ExternalReference limit =
in_new_space
? ExternalReference::new_space_allocation_limit_address(isolate_)
: ExternalReference::old_space_allocation_limit_address(isolate_);
ZoneLabelRef done(this);
Register new_top = kScratchRegister;
// Check if there is enough space.
Move(object, ExternalReferenceAsOperand(top));
leaq(new_top, Operand(object, size_in_bytes));
cmpq(new_top, ExternalReferenceAsOperand(limit));
// Otherwise call runtime.
JumpToDeferredIf(
greater_equal,
[](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
Register object, Builtin builtin, int size_in_bytes,
ZoneLabelRef done) {
// Remove {object} from snapshot, since it is the returned allocated
// HeapObject.
register_snapshot.live_registers.clear(object);
register_snapshot.live_tagged_registers.clear(object);
{
SaveRegisterStateForCall save_register_state(masm, register_snapshot);
using D = AllocateDescriptor;
__ Move(D::GetRegisterParameter(D::kRequestedSize), size_in_bytes);
__ CallBuiltin(builtin);
save_register_state.DefineSafepoint();
__ Move(object, kReturnRegister0);
}
__ jmp(*done);
},
register_snapshot, object,
in_new_space ? Builtin::kAllocateRegularInYoungGeneration
: Builtin::kAllocateRegularInOldGeneration,
size_in_bytes, done);
// Store new top and tag object.
movq(ExternalReferenceAsOperand(top), new_top);
addq(object, Immediate(kHeapObjectTag));
bind(*done);
}
void MaglevAssembler::ToBoolean(Register value, ZoneLabelRef is_true,
ZoneLabelRef is_false,
bool fallthrough_when_true) {
Register map = kScratchRegister;
// Check if {{value}} is Smi.
CheckSmi(value);
JumpToDeferredIf(
zero,
[](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
ZoneLabelRef is_false) {
// Check if {value} is not zero.
__ SmiCompare(value, Smi::FromInt(0));
__ j(equal, *is_false);
__ jmp(*is_true);
},
value, is_true, is_false);
// Check if {{value}} is false.
CompareRoot(value, RootIndex::kFalseValue);
j(equal, *is_false);
// Check if {{value}} is empty string.
CompareRoot(value, RootIndex::kempty_string);
j(equal, *is_false);
// Check if {{value}} is undetectable.
LoadMap(map, value);
testl(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsUndetectableBit::kMask));
j(not_zero, *is_false);
// Check if {{value}} is a HeapNumber.
CompareRoot(map, RootIndex::kHeapNumberMap);
JumpToDeferredIf(
equal,
[](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
ZoneLabelRef is_false) {
// Sets scratch register to 0.0.
__ Xorpd(kScratchDoubleReg, kScratchDoubleReg);
// Sets ZF if equal to 0.0, -0.0 or NaN.
__ Ucomisd(kScratchDoubleReg,
FieldOperand(value, HeapNumber::kValueOffset));
__ j(zero, *is_false);
__ jmp(*is_true);
},
value, is_true, is_false);
// Check if {{value}} is a BigInt.
CompareRoot(map, RootIndex::kBigIntMap);
JumpToDeferredIf(
equal,
[](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
ZoneLabelRef is_false) {
__ testl(FieldOperand(value, BigInt::kBitfieldOffset),
Immediate(BigInt::LengthBits::kMask));
__ j(zero, *is_false);
__ jmp(*is_true);
},
value, is_true, is_false);
// Otherwise true.
if (!fallthrough_when_true) {
jmp(*is_true);
}
}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -12,6 +12,28 @@ namespace v8 {
namespace internal {
namespace maglev {
class MaglevAssembler;
// Label allowed to be passed to deferred code.
class ZoneLabelRef {
public:
explicit ZoneLabelRef(Zone* zone) : label_(zone->New<Label>()) {}
explicit inline ZoneLabelRef(MaglevAssembler* masm);
static ZoneLabelRef UnsafeFromLabelPointer(Label* label) {
// This is an unsafe operation, {label} must be zone allocated.
return ZoneLabelRef(label);
}
Label* operator*() { return label_; }
private:
Label* label_;
// Unsafe constructor. {label} must be zone allocated.
explicit ZoneLabelRef(Label* label) : label_(label) {}
};
class MaglevAssembler : public MacroAssembler {
public:
explicit MaglevAssembler(Isolate* isolate, MaglevCodeGenState* code_gen_state)
@ -39,11 +61,19 @@ class MaglevAssembler : public MacroAssembler {
return GetFramePointerOffsetForStackSlot(index);
}
void Allocate(RegisterSnapshot& register_snapshot, Register result,
int size_in_bytes,
AllocationType alloc_type = AllocationType::kYoung,
AllocationAlignment alignment = kTaggedAligned);
inline void Branch(Condition condition, BasicBlock* if_true,
BasicBlock* if_false, BasicBlock* next_block);
inline void PushInput(const Input& input);
inline Register FromAnyToRegister(const Input& input, Register scratch);
void ToBoolean(Register value, ZoneLabelRef is_true, ZoneLabelRef is_false,
bool fallthrough_when_true);
inline void DefineLazyDeoptPoint(LazyDeoptInfo* info);
inline void DefineExceptionHandlerPoint(NodeBase* node);
inline void DefineExceptionHandlerAndLazyDeoptPoint(NodeBase* node);
@ -84,25 +114,46 @@ class MaglevAssembler : public MacroAssembler {
MaglevCodeGenState* const code_gen_state_;
};
// Label allowed to be passed to deferred code.
class ZoneLabelRef {
class SaveRegisterStateForCall {
public:
explicit ZoneLabelRef(Zone* zone) : label_(zone->New<Label>()) {}
explicit inline ZoneLabelRef(MaglevAssembler* masm)
: ZoneLabelRef(masm->compilation_info()->zone()) {}
static ZoneLabelRef UnsafeFromLabelPointer(Label* label) {
// This is an unsafe operation, {label} must be zone allocated.
return ZoneLabelRef(label);
SaveRegisterStateForCall(MaglevAssembler* masm, RegisterSnapshot snapshot)
: masm(masm), snapshot_(snapshot) {
masm->PushAll(snapshot_.live_registers);
masm->PushAll(snapshot_.live_double_registers, kDoubleSize);
}
Label* operator*() { return label_; }
~SaveRegisterStateForCall() {
masm->PopAll(snapshot_.live_double_registers, kDoubleSize);
masm->PopAll(snapshot_.live_registers);
}
MaglevSafepointTableBuilder::Safepoint DefineSafepoint() {
// TODO(leszeks): Avoid emitting safepoints when there are no registers to
// save.
auto safepoint = masm->safepoint_table_builder()->DefineSafepoint(masm);
int pushed_reg_index = 0;
for (Register reg : snapshot_.live_registers) {
if (snapshot_.live_tagged_registers.has(reg)) {
safepoint.DefineTaggedRegister(pushed_reg_index);
}
pushed_reg_index++;
}
int num_pushed_double_reg = snapshot_.live_double_registers.Count();
safepoint.SetNumPushedRegisters(pushed_reg_index + num_pushed_double_reg);
return safepoint;
}
MaglevSafepointTableBuilder::Safepoint DefineSafepointWithLazyDeopt(
LazyDeoptInfo* lazy_deopt_info) {
lazy_deopt_info->set_deopting_call_return_pc(
masm->pc_offset_for_safepoint());
masm->code_gen_state()->PushLazyDeopt(lazy_deopt_info);
return DefineSafepoint();
}
private:
Label* label_;
// Unsafe constructor. {label} must be zone allocated.
explicit ZoneLabelRef(Label* label) : label_(label) {}
MaglevAssembler* masm;
RegisterSnapshot snapshot_;
};
} // namespace maglev

View File

@ -100,52 +100,6 @@ void UseFixed(Input& input, Register reg) {
reg.code(), GetVirtualRegister(input.node()));
}
// ---
// Code gen helpers.
// ---
class SaveRegisterStateForCall {
public:
SaveRegisterStateForCall(MaglevAssembler* masm, RegisterSnapshot snapshot)
: masm(masm), snapshot_(snapshot) {
__ PushAll(snapshot_.live_registers);
__ PushAll(snapshot_.live_double_registers, kDoubleSize);
}
~SaveRegisterStateForCall() {
__ PopAll(snapshot_.live_double_registers, kDoubleSize);
__ PopAll(snapshot_.live_registers);
}
MaglevSafepointTableBuilder::Safepoint DefineSafepoint() {
// TODO(leszeks): Avoid emitting safepoints when there are no registers to
// save.
auto safepoint = masm->safepoint_table_builder()->DefineSafepoint(masm);
int pushed_reg_index = 0;
for (Register reg : snapshot_.live_registers) {
if (snapshot_.live_tagged_registers.has(reg)) {
safepoint.DefineTaggedRegister(pushed_reg_index);
}
pushed_reg_index++;
}
int num_pushed_double_reg = snapshot_.live_double_registers.Count();
safepoint.SetNumPushedRegisters(pushed_reg_index + num_pushed_double_reg);
return safepoint;
}
MaglevSafepointTableBuilder::Safepoint DefineSafepointWithLazyDeopt(
LazyDeoptInfo* lazy_deopt_info) {
lazy_deopt_info->set_deopting_call_return_pc(
masm->pc_offset_for_safepoint());
masm->code_gen_state()->PushLazyDeopt(lazy_deopt_info);
return DefineSafepoint();
}
private:
MaglevAssembler* masm;
RegisterSnapshot snapshot_;
};
#ifdef DEBUG
RegList GetGeneralRegistersUsedAsInputs(const EagerDeoptInfo* deopt_info) {
RegList regs;
@ -163,134 +117,6 @@ RegList GetGeneralRegistersUsedAsInputs(const EagerDeoptInfo* deopt_info) {
// when non-empty.
#define DCHECK_REGLIST_EMPTY(...) DCHECK_EQ((__VA_ARGS__), RegList{})
// ---
// Inlined computations.
// ---
void AllocateRaw(MaglevAssembler* masm, RegisterSnapshot& register_snapshot,
Register object, int size_in_bytes,
AllocationType alloc_type = AllocationType::kYoung,
AllocationAlignment alignment = kTaggedAligned) {
// TODO(victorgomes): Call the runtime for large object allocation.
// TODO(victorgomes): Support double alignment.
DCHECK_EQ(alignment, kTaggedAligned);
size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
if (v8_flags.single_generation) {
alloc_type = AllocationType::kOld;
}
bool in_new_space = alloc_type == AllocationType::kYoung;
Isolate* isolate = masm->isolate();
ExternalReference top =
in_new_space
? ExternalReference::new_space_allocation_top_address(isolate)
: ExternalReference::old_space_allocation_top_address(isolate);
ExternalReference limit =
in_new_space
? ExternalReference::new_space_allocation_limit_address(isolate)
: ExternalReference::old_space_allocation_limit_address(isolate);
ZoneLabelRef done(masm);
Register new_top = kScratchRegister;
// Check if there is enough space.
__ Move(object, __ ExternalReferenceAsOperand(top));
__ leaq(new_top, Operand(object, size_in_bytes));
__ cmpq(new_top, __ ExternalReferenceAsOperand(limit));
// Otherwise call runtime.
__ JumpToDeferredIf(
greater_equal,
[](MaglevAssembler* masm, RegisterSnapshot register_snapshot,
Register object, Builtin builtin, int size_in_bytes,
ZoneLabelRef done) {
// Remove {object} from snapshot, since it is the returned allocated
// HeapObject.
register_snapshot.live_registers.clear(object);
register_snapshot.live_tagged_registers.clear(object);
{
SaveRegisterStateForCall save_register_state(masm, register_snapshot);
using D = AllocateDescriptor;
__ Move(D::GetRegisterParameter(D::kRequestedSize), size_in_bytes);
__ CallBuiltin(builtin);
save_register_state.DefineSafepoint();
__ Move(object, kReturnRegister0);
}
__ jmp(*done);
},
register_snapshot, object,
in_new_space ? Builtin::kAllocateRegularInYoungGeneration
: Builtin::kAllocateRegularInOldGeneration,
size_in_bytes, done);
// Store new top and tag object.
__ movq(__ ExternalReferenceAsOperand(top), new_top);
__ addq(object, Immediate(kHeapObjectTag));
__ bind(*done);
}
void EmitToBoolean(MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
ZoneLabelRef is_false, bool fallthrough_when_true) {
Register map = kScratchRegister;
// Check if {{value}} is Smi.
__ CheckSmi(value);
__ JumpToDeferredIf(
zero,
[](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
ZoneLabelRef is_false) {
// Check if {value} is not zero.
__ SmiCompare(value, Smi::FromInt(0));
__ j(equal, *is_false);
__ jmp(*is_true);
},
value, is_true, is_false);
// Check if {{value}} is false.
__ CompareRoot(value, RootIndex::kFalseValue);
__ j(equal, *is_false);
// Check if {{value}} is empty string.
__ CompareRoot(value, RootIndex::kempty_string);
__ j(equal, *is_false);
// Check if {{value}} is undetectable.
__ LoadMap(map, value);
__ testl(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsUndetectableBit::kMask));
__ j(not_zero, *is_false);
// Check if {{value}} is a HeapNumber.
__ CompareRoot(map, RootIndex::kHeapNumberMap);
__ JumpToDeferredIf(
equal,
[](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
ZoneLabelRef is_false) {
// Sets scratch register to 0.0.
__ Xorpd(kScratchDoubleReg, kScratchDoubleReg);
// Sets ZF if equal to 0.0, -0.0 or NaN.
__ Ucomisd(kScratchDoubleReg,
FieldOperand(value, HeapNumber::kValueOffset));
__ j(zero, *is_false);
__ jmp(*is_true);
},
value, is_true, is_false);
// Check if {{value}} is a BigInt.
__ CompareRoot(map, RootIndex::kBigIntMap);
__ JumpToDeferredIf(
equal,
[](MaglevAssembler* masm, Register value, ZoneLabelRef is_true,
ZoneLabelRef is_false) {
__ testl(FieldOperand(value, BigInt::kBitfieldOffset),
Immediate(BigInt::LengthBits::kMask));
__ j(zero, *is_false);
__ jmp(*is_true);
},
value, is_true, is_false);
// Otherwise true.
if (!fallthrough_when_true) {
__ jmp(*is_true);
}
}
// ---
// Print
// ---
@ -1003,7 +829,7 @@ void CreateEmptyObjectLiteral::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register object = ToRegister(result());
RegisterSnapshot save_registers = register_snapshot();
AllocateRaw(masm, save_registers, object, map().instance_size());
__ Allocate(save_registers, object, map().instance_size());
__ Move(kScratchRegister, map().object());
__ StoreTaggedField(FieldOperand(object, HeapObject::kMapOffset),
kScratchRegister);
@ -1696,8 +1522,7 @@ void InlinedBuiltinStringFromCharCode::AllocateVreg(
void InlinedBuiltinStringFromCharCode::AllocateTwoByteString(
MaglevAssembler* masm, Register result_string,
RegisterSnapshot save_registers) {
AllocateRaw(masm, save_registers, result_string,
SeqTwoByteString::SizeFor(1));
__ Allocate(save_registers, result_string, SeqTwoByteString::SizeFor(1));
__ LoadRoot(kScratchRegister, RootIndex::kStringMap);
__ StoreTaggedField(FieldOperand(result_string, HeapObject::kMapOffset),
kScratchRegister);
@ -2240,7 +2065,7 @@ void StringAt::GenerateCode(MaglevAssembler* masm,
character = scratch1;
}
save_registers.live_registers.set(character);
AllocateRaw(masm, save_registers, result_string,
__ Allocate(save_registers, result_string,
SeqTwoByteString::SizeFor(1));
__ LoadRoot(kScratchRegister, RootIndex::kStringMap);
__ StoreTaggedField(FieldOperand(result_string, HeapObject::kMapOffset),
@ -3116,7 +2941,7 @@ void Float64Box::GenerateCode(MaglevAssembler* masm,
// call might trash it.
RegisterSnapshot save_registers = register_snapshot();
save_registers.live_double_registers.set(value);
AllocateRaw(masm, save_registers, object, HeapNumber::kSize);
__ Allocate(save_registers, object, HeapNumber::kSize);
__ LoadRoot(kScratchRegister, RootIndex::kHeapNumberMap);
__ StoreTaggedField(FieldOperand(object, HeapObject::kMapOffset),
kScratchRegister);
@ -3223,7 +3048,7 @@ void ToBoolean::GenerateCode(MaglevAssembler* masm,
ZoneLabelRef object_is_true(zone), object_is_false(zone);
// TODO(leszeks): We're likely to be calling this on an existing boolean --
// maybe that's a case we should fast-path here and re-use that boolean value?
EmitToBoolean(masm, object, object_is_true, object_is_false, true);
__ ToBoolean(object, object_is_true, object_is_false, true);
__ bind(*object_is_true);
__ LoadRoot(return_value, RootIndex::kTrueValue);
__ jmp(&done, Label::kNear);
@ -3243,7 +3068,7 @@ void ToBooleanLogicalNot::GenerateCode(MaglevAssembler* masm,
Label done;
Zone* zone = masm->compilation_info()->zone();
ZoneLabelRef object_is_true(zone), object_is_false(zone);
EmitToBoolean(masm, object, object_is_true, object_is_false, true);
__ ToBoolean(object, object_is_true, object_is_false, true);
__ bind(*object_is_true);
__ LoadRoot(return_value, RootIndex::kFalseValue);
__ jmp(&done, Label::kNear);
@ -4470,8 +4295,8 @@ void BranchIfToBooleanTrue::GenerateCode(MaglevAssembler* masm,
ZoneLabelRef false_label =
ZoneLabelRef::UnsafeFromLabelPointer(if_false()->label());
bool fallthrough_when_true = (if_true() == state.next_block());
EmitToBoolean(masm, ToRegister(condition_input()), true_label, false_label,
fallthrough_when_true);
__ ToBoolean(ToRegister(condition_input()), true_label, false_label,
fallthrough_when_true);
}
} // namespace maglev