[maglev] Elide hasOwnProperty on fast for-in

WIP

Bug: v8:7700
Change-Id: I48feba3e38967ba38873efdef6827d2218fbc426
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4184202
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/main@{#85683}
This commit is contained in:
Leszek Swirski 2023-02-06 11:36:34 +01:00 committed by V8 LUCI CQ
parent d12417f875
commit 58076e4263
10 changed files with 461 additions and 35 deletions

View File

@ -34,6 +34,19 @@ constexpr Condition ConditionFor(Operation operation) {
}
}
inline int ShiftFromScale(int n) {
switch (n) {
case 1:
return 0;
case 2:
return 1;
case 4:
return 2;
default:
UNREACHABLE();
}
}
class MaglevAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(MaglevAssembler* masm) : wrapped_scope_(masm) {
@ -352,6 +365,18 @@ inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer,
Add(data_pointer, data_pointer, base);
}
inline void MaglevAssembler::LoadTaggedFieldByIndex(Register result,
Register object,
Register index, int scale,
int offset) {
if (scale == 1) {
Add(result, object, index);
} else {
Add(result, object, Operand(index, LSL, ShiftFromScale(scale / 2)));
}
MacroAssembler::LoadTaggedField(result, FieldMemOperand(result, offset));
}
inline void MaglevAssembler::LoadBoundedSizeFromObject(Register result,
Register object,
int offset) {
@ -472,6 +497,9 @@ inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) {
Mov(dst, Operand(src.W(), SXTW));
}
inline void MaglevAssembler::NegateInt32(Register val) {
Neg(val.W(), val.W());
}
template <typename NodeT>
inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
@ -585,6 +613,23 @@ inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2,
CompareAndBranch(r1.W(), r2.W(), cond, target);
}
inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, int32_t value,
Condition cond,
Label* target,
Label::Distance distance) {
CompareAndBranch(r1.W(), Immediate(value), cond, target);
}
inline void MaglevAssembler::TestInt32AndJumpIfAnySet(
Register r1, int32_t mask, Label* target, Label::Distance distance) {
TestAndBranchIfAnySet(r1.W(), mask, target);
}
inline void MaglevAssembler::TestInt32AndJumpIfAllClear(
Register r1, int32_t mask, Label* target, Label::Distance distance) {
TestAndBranchIfAllClear(r1.W(), mask, target);
}
inline void MaglevAssembler::LoadHeapNumberValue(DoubleRegister result,
Register heap_number) {
Ldr(result, FieldMemOperand(heap_number, HeapNumber::kValueOffset));

View File

@ -6,6 +6,7 @@
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/arm64/register-arm64.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/maglev/arm64/maglev-assembler-arm64-inl.h"
#include "src/maglev/maglev-assembler-inl.h"
#include "src/maglev/maglev-graph-processor.h"
#include "src/maglev/maglev-graph.h"
@ -1711,13 +1712,15 @@ void GenerateTypedArrayLoad(MaglevAssembler* masm, NodeT* node, Register object,
if constexpr (std::is_same_v<ResultReg, Register>) {
if (IsSignedIntTypedArrayElementsKind(kind)) {
int element_size = ElementsKindSize(kind);
__ Add(data_pointer, data_pointer, Operand(index, LSL, element_size / 2));
__ Add(data_pointer, data_pointer,
Operand(index, LSL, ShiftFromScale(element_size)));
__ LoadSignedField(result_reg.W(), MemOperand(data_pointer),
element_size);
} else {
DCHECK(IsUnsignedIntTypedArrayElementsKind(kind));
int element_size = ElementsKindSize(kind);
__ Add(data_pointer, data_pointer, Operand(index, LSL, element_size / 2));
__ Add(data_pointer, data_pointer,
Operand(index, LSL, ShiftFromScale(element_size)));
__ LoadUnsignedField(result_reg.W(), MemOperand(data_pointer),
element_size);
}

View File

@ -5,6 +5,8 @@
#ifndef V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
#define V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
#include "src/maglev/maglev-assembler.h"
#ifdef V8_TARGET_ARCH_ARM64
#include "src/maglev/arm64/maglev-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_X64
@ -242,6 +244,27 @@ inline void MaglevAssembler::Branch(Condition condition, Label* if_true,
}
}
inline void MaglevAssembler::LoadTaggedField(Register result,
MemOperand operand) {
MacroAssembler::LoadTaggedField(result, operand);
}
inline void MaglevAssembler::LoadTaggedField(Register result, Register object,
int offset) {
MacroAssembler::LoadTaggedField(result, FieldMemOperand(object, offset));
}
inline void MaglevAssembler::LoadTaggedSignedField(Register result,
MemOperand operand) {
MacroAssembler::LoadTaggedField(result, operand);
}
inline void MaglevAssembler::LoadTaggedSignedField(Register result,
Register object,
int offset) {
MacroAssembler::LoadTaggedField(result, FieldMemOperand(object, offset));
}
} // namespace maglev
} // namespace internal
} // namespace v8

View File

@ -98,6 +98,13 @@ class MaglevAssembler : public MacroAssembler {
Register FromAnyToRegister(const Input& input, Register scratch);
inline void LoadTaggedField(Register result, MemOperand operand);
inline void LoadTaggedField(Register result, Register object, int offset);
inline void LoadTaggedSignedField(Register result, MemOperand operand);
inline void LoadTaggedSignedField(Register result, Register object,
int offset);
inline void LoadTaggedFieldByIndex(Register result, Register object,
Register index, int scale, int offset);
inline void LoadBoundedSizeFromObject(Register result, Register object,
int offset);
inline void LoadExternalPointerField(Register result, MemOperand operand);
@ -187,6 +194,7 @@ class MaglevAssembler : public MacroAssembler {
inline void LoadByte(Register dst, MemOperand src);
inline void SignExtend32To64Bits(Register dst, Register src);
inline void NegateInt32(Register val);
template <typename NodeT>
inline void DeoptIfBufferDetached(Register array, Register scratch,
@ -226,6 +234,14 @@ class MaglevAssembler : public MacroAssembler {
inline void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond,
Label* target,
Label::Distance distance = Label::kFar);
inline void CompareInt32AndJumpIf(Register r1, int32_t value, Condition cond,
Label* target,
Label::Distance distance = Label::kFar);
inline void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label* target,
Label::Distance distance = Label::kFar);
inline void TestInt32AndJumpIfAllClear(
Register r1, int32_t mask, Label* target,
Label::Distance distance = Label::kFar);
inline void Int32ToDouble(DoubleRegister result, Register n);
inline void SmiToDouble(DoubleRegister result, Register smi);

View File

@ -1639,6 +1639,7 @@ void MaglevGraphBuilder::BuildCheckMaps(
if (merger.emit_check_with_migration()) {
AddNewNode<CheckMapsWithMigration>({object}, merger.intersect_set(),
GetCheckType(known_info->type));
MarkPossibleMapMigration();
} else {
AddNewNode<CheckMaps>({object}, merger.intersect_set(),
GetCheckType(known_info->type));
@ -2546,6 +2547,28 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
broker()->GetFeedbackForPropertyAccess(
feedback_source, compiler::AccessMode::kLoad, base::nullopt);
if (current_for_in_state.index != nullptr &&
current_for_in_state.receiver == object &&
current_for_in_state.key == current_interpreter_frame_.accumulator()) {
if (current_for_in_state.receiver_needs_map_check) {
auto* receiver_map =
AddNewNode<LoadTaggedField>({object}, HeapObject::kMapOffset);
AddNewNode<CheckDynamicValue>(
{receiver_map, current_for_in_state.cache_type});
current_for_in_state.receiver_needs_map_check = false;
}
// TODO(leszeks): Cache the indices across the loop.
auto* cache_array = AddNewNode<LoadTaggedField>(
{current_for_in_state.enum_cache}, EnumCache::kIndicesOffset);
// TODO(leszeks): Do we need to check that the indices aren't empty?
// TODO(leszeks): Cache the field index per iteration.
auto* field_index = AddNewNode<LoadFixedArrayElement>(
{cache_array, current_for_in_state.index});
SetAccumulator(
AddNewNode<LoadTaggedFieldByFieldIndex>({object, field_index}));
return;
}
switch (processed_feedback.kind()) {
case compiler::ProcessedFeedback::kInsufficient:
EmitUnconditionalDeopt(
@ -3255,6 +3278,21 @@ ReduceResult MaglevGraphBuilder::TryReduceFunctionPrototypeCall(
return BuildGenericCall(receiver, context, Call::TargetType::kAny, args);
}
ReduceResult MaglevGraphBuilder::TryReduceObjectPrototypeHasOwnProperty(
compiler::JSFunctionRef target, CallArguments& args) {
// We can't reduce Function#call when there is no receiver function.
if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
return ReduceResult::Fail();
}
if (args.receiver() != current_for_in_state.receiver) {
return ReduceResult::Fail();
}
if (args.count() != 1 || args[0] != current_for_in_state.key) {
return ReduceResult::Fail();
}
return GetRootConstant(RootIndex::kTrueValue);
}
ReduceResult MaglevGraphBuilder::TryReduceMathPow(
compiler::JSFunctionRef target, CallArguments& args) {
if (args.count() < 2) {
@ -4663,12 +4701,27 @@ void MaglevGraphBuilder::BuildBranchIfToBooleanTrue(ValueNode* node,
JumpType jump_type) {
int fallthrough_offset = next_offset();
int jump_offset = iterator_.GetJumpTargetOffset();
if (IsConstantNode(node->opcode())) {
bool constant_is_true = FromConstantToBool(local_isolate(), node);
bool is_jump_taken = constant_is_true == (jump_type == kJumpIfTrue);
if (is_jump_taken) {
BasicBlock* block = FinishBlock<Jump>({}, &jump_targets_[jump_offset]);
MergeDeadIntoFrameState(fallthrough_offset);
MergeIntoFrameState(block, jump_offset);
} else {
MergeDeadIntoFrameState(jump_offset);
}
return;
}
BasicBlockRef* true_target = jump_type == kJumpIfTrue
? &jump_targets_[jump_offset]
: &jump_targets_[fallthrough_offset];
BasicBlockRef* false_target = jump_type == kJumpIfFalse
? &jump_targets_[jump_offset]
: &jump_targets_[fallthrough_offset];
BasicBlock* block =
FinishBlock<BranchIfToBooleanTrue>({node}, true_target, false_target);
if (jump_type == kJumpIfTrue) {
@ -4767,6 +4820,7 @@ void MaglevGraphBuilder::VisitForInPrepare() {
ForInHint hint = broker()->GetFeedbackForForIn(feedback_source);
current_for_in_state = ForInState();
switch (hint) {
case ForInHint::kNone:
case ForInHint::kEnumCacheKeysAndIndices:
@ -4781,6 +4835,7 @@ void MaglevGraphBuilder::VisitForInPrepare() {
{descriptor_array}, DescriptorArray::kEnumCacheOffset);
auto* cache_array =
AddNewNode<LoadTaggedField>({enum_cache}, EnumCache::kKeysOffset);
current_for_in_state.enum_cache = enum_cache;
auto* cache_length = AddNewNode<LoadEnumCacheLength>({enumerator});
@ -4804,6 +4859,8 @@ void MaglevGraphBuilder::VisitForInPrepare() {
// cache_array, and cache_length respectively. Cache type is already set
// above, so store the remaining two now.
StoreRegisterPair({cache_array_reg, cache_length_reg}, result);
// Force a conversion to Int32 for the cache length value.
GetInt32(cache_length_reg);
break;
}
}
@ -4811,10 +4868,14 @@ void MaglevGraphBuilder::VisitForInPrepare() {
void MaglevGraphBuilder::VisitForInContinue() {
// ForInContinue <index> <cache_length>
ValueNode* index = LoadRegisterTagged(0);
ValueNode* cache_length = LoadRegisterTagged(1);
// TODO(verwaest): Fold with the next instruction.
SetAccumulator(AddNewNode<TaggedNotEqual>({index, cache_length}));
ValueNode* index = LoadRegisterInt32(0);
ValueNode* cache_length = LoadRegisterInt32(1);
if (TryBuildBranchFor<BranchIfInt32Compare>({index, cache_length},
Operation::kLessThan)) {
return;
}
SetAccumulator(
AddNewNode<Int32NodeFor<Operation::kLessThan>>({index, cache_length}));
}
void MaglevGraphBuilder::VisitForInNext() {
@ -4839,7 +4900,26 @@ void MaglevGraphBuilder::VisitForInNext() {
auto* receiver_map =
AddNewNode<LoadTaggedField>({receiver}, HeapObject::kMapOffset);
AddNewNode<CheckDynamicValue>({receiver_map, cache_type});
SetAccumulator(AddNewNode<LoadFixedArrayElement>({cache_array, index}));
auto* key = AddNewNode<LoadFixedArrayElement>({cache_array, index});
SetAccumulator(key);
current_for_in_state.receiver = receiver;
if (ToObject* to_object =
current_for_in_state.receiver->TryCast<ToObject>()) {
current_for_in_state.receiver = to_object->value_input().node();
}
current_for_in_state.receiver_needs_map_check = false;
current_for_in_state.cache_type = cache_type;
current_for_in_state.key = key;
if (hint == ForInHint::kEnumCacheKeysAndIndices) {
current_for_in_state.index = index;
}
// We know that the enum cache entry is not undefined, so skip over the
// next JumpIfUndefined.
DCHECK_EQ(iterator_.next_bytecode(),
interpreter::Bytecode::kJumpIfUndefined);
iterator_.Advance();
MergeDeadIntoFrameState(iterator_.GetJumpTargetOffset());
break;
}
case ForInHint::kAny: {
@ -4856,6 +4936,7 @@ void MaglevGraphBuilder::VisitForInNext() {
void MaglevGraphBuilder::VisitForInStep() {
ValueNode* index = LoadRegisterInt32(0);
SetAccumulator(AddNewNode<Int32NodeFor<Operation::kIncrement>>({index}));
current_for_in_state = ForInState();
}
void MaglevGraphBuilder::VisitSetPendingMessage() {

View File

@ -1117,6 +1117,10 @@ class MaglevGraphBuilder {
: nullptr);
}
void MarkPossibleMapMigration() {
current_for_in_state.receiver_needs_map_check = true;
}
void MarkPossibleSideEffect() {
// If there was a potential side effect, invalidate the previous checkpoint.
latest_checkpointed_frame_.reset();
@ -1142,6 +1146,9 @@ class MaglevGraphBuilder {
// clear those.
known_node_aspects().loaded_properties.clear();
known_node_aspects().loaded_context_slots.clear();
// Any side effect could also be a map migration.
MarkPossibleMapMigration();
}
int next_offset() const {
@ -1278,6 +1285,7 @@ class MaglevGraphBuilder {
V(DataViewPrototypeGetFloat64) \
V(DataViewPrototypeSetFloat64) \
V(FunctionPrototypeCall) \
V(ObjectPrototypeHasOwnProperty) \
V(MathPow) \
V(StringFromCharCode) \
V(StringPrototypeCharCodeAt) \
@ -1589,6 +1597,16 @@ class MaglevGraphBuilder {
BasicBlock* current_block_ = nullptr;
base::Optional<InterpretedDeoptFrame> latest_checkpointed_frame_;
SourcePosition current_source_position_;
struct ForInState {
ValueNode* receiver = nullptr;
ValueNode* cache_type = nullptr;
ValueNode* enum_cache = nullptr;
ValueNode* key = nullptr;
ValueNode* index = nullptr;
bool receiver_needs_map_check = false;
};
// TODO(leszeks): Allow having a stack of these.
ForInState current_for_in_state = ForInState();
BasicBlockRef* jump_targets_;
MergePointInterpreterFrameState** merge_states_;

View File

@ -182,9 +182,8 @@ bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const {
return RootToBoolean(index_);
}
bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node) {
bool FromConstantToBool(LocalIsolate* local_isolate, ValueNode* node) {
DCHECK(IsConstantNode(node->opcode()));
LocalIsolate* local_isolate = masm->isolate()->AsLocalIsolate();
switch (node->opcode()) {
#define CASE(Name) \
case Opcode::k##Name: { \
@ -197,6 +196,14 @@ bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node) {
}
}
bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node) {
// TODO(leszeks): Getting the main thread local isolate is not what we
// actually want here, but it's all we have, and it happens to work because
// really all we're using it for is ReadOnlyRoots. We should change ToBoolean
// to be able to pass ReadOnlyRoots in directly.
return FromConstantToBool(masm->isolate()->AsLocalIsolate(), node);
}
DeoptInfo::DeoptInfo(Zone* zone, DeoptFrame top_frame,
compiler::FeedbackSource feedback_to_update)
: top_frame_(top_frame),
@ -969,6 +976,185 @@ void LoadTaggedField::GenerateCode(MaglevAssembler* masm,
__ DecompressTagged(ToRegister(result()), FieldMemOperand(object, offset()));
}
void LoadTaggedFieldByFieldIndex::SetValueLocationConstraints() {
UseRegister(object_input());
UseAndClobberRegister(index_input());
DefineAsRegister(this);
set_temporaries_needed(1);
set_double_temporaries_needed(1);
}
void LoadTaggedFieldByFieldIndex::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register object = ToRegister(object_input());
Register index = ToRegister(index_input());
Register result_reg = ToRegister(result());
__ AssertNotSmi(object);
__ AssertSmi(index);
ZoneLabelRef done(masm);
// For in-object properties, the index is encoded as:
//
// index = actual_index | is_double_bit | smi_tag_bit
// = actual_index << 2 | is_double_bit << 1
//
// The value we want is at the field offset:
//
// (actual_index << kTaggedSizeLog2) + JSObject::kHeaderSize
//
// We could get index from actual_index by shifting away the double and smi
// bits. But, note that `kTaggedSizeLog2 == 2` and `index` encodes
// `actual_index` with a two bit shift. So, we can do some rearranging
// to get the offset without shifting:
//
// ((index >> 2) << kTaggedSizeLog2 + JSObject::kHeaderSize
//
// [Expand definitions of index and kTaggedSizeLog2]
// = (((actual_index << 2 | is_double_bit << 1) >> 2) << 2)
// + JSObject::kHeaderSize
//
// [Cancel out shift down and shift up, clear is_double bit by subtracting]
// = (actual_index << 2 | is_double_bit << 1) - (is_double_bit << 1)
// + JSObject::kHeaderSize
//
// [Fold together the constants, and collapse definition of index]
// = index + (JSObject::kHeaderSize - (is_double_bit << 1))
//
//
// For out-of-object properties, the encoding is:
//
// index = (-1 - actual_index) | is_double_bit | smi_tag_bit
// = (-1 - actual_index) << 2 | is_double_bit << 1
// = (-1 - actual_index) * 4 + (is_double_bit ? 2 : 0)
// = -(actual_index * 4) + (is_double_bit ? 2 : 0) - 4
// = -(actual_index << 2) + (is_double_bit ? 2 : 0) - 4
//
// The value we want is in the property array at offset:
//
// (actual_index << kTaggedSizeLog2) + FixedArray::kHeaderSize
//
// [Expand definition of kTaggedSizeLog2]
// = (actual_index << 2) + FixedArray::kHeaderSize
//
// [Substitute in index]
// = (-index + (is_double_bit ? 2 : 0) - 4) + FixedArray::kHeaderSize
//
// [Fold together the constants]
// = -index + (FixedArray::kHeaderSize + (is_double_bit ? 2 : 0) - 4))
//
// This allows us to simply negate the index register and do a load with
// otherwise constant offset.
// Check if field is a mutable double field.
static constexpr int32_t kIsDoubleBitMask = 1 << kSmiTagSize;
__ TestInt32AndJumpIfAnySet(
index, kIsDoubleBitMask,
__ MakeDeferredCode(
[](MaglevAssembler* masm, Register object, Register index,
Register result_reg, RegisterSnapshot register_snapshot,
ZoneLabelRef done) {
// The field is a Double field, a.k.a. a mutable HeapNumber.
static const int kIsDoubleBit = 1;
// Check if field is in-object or out-of-object. The is_double bit
// value doesn't matter, since negative values will stay negative.
Label if_outofobject, loaded_field;
__ CompareInt32AndJumpIf(index, 0, kLessThan, &if_outofobject);
// The field is located in the {object} itself.
{
// See giant comment above.
static_assert(kTaggedSizeLog2 == 2);
static_assert(kSmiTagSize == 1);
// We haven't untagged, so we need to sign extend.
__ SignExtend32To64Bits(index, index);
__ LoadTaggedFieldByIndex(
result_reg, object, index, 1,
JSObject::kHeaderSize - (kIsDoubleBit << kSmiTagSize));
__ Jump(&loaded_field);
}
__ bind(&if_outofobject);
{
MaglevAssembler::ScratchRegisterScope temps(masm);
Register property_array = temps.Acquire();
// Load the property array.
__ LoadTaggedField(
property_array,
FieldMemOperand(object, JSObject::kPropertiesOrHashOffset));
// See giant comment above.
static_assert(kSmiTagSize == 1);
__ NegateInt32(index);
__ LoadTaggedFieldByIndex(
result_reg, property_array, index, 1,
FixedArray::kHeaderSize + (kIsDoubleBit << kSmiTagSize) - 4);
__ Jump(&loaded_field);
}
__ bind(&loaded_field);
// We may have transitioned in-place away from double, so check that
// this is a HeapNumber -- otherwise the load is fine and we don't
// need to copy anything anyway.
__ JumpIfSmi(result_reg, *done);
// index is no longer needed and is clobbered by this node, so
// reuse it as a scratch reg storing the map.
Register map = index;
__ LoadMap(map, result_reg);
__ JumpIfNotRoot(map, RootIndex::kHeapNumberMap, *done);
MaglevAssembler::ScratchRegisterScope temps(masm);
DoubleRegister double_value = temps.AcquireDouble();
__ LoadHeapNumberValue(double_value, result_reg);
__ AllocateHeapNumber(register_snapshot, result_reg, double_value);
__ Jump(*done);
},
object, index, result_reg, register_snapshot(), done));
// The field is a proper Tagged field on {object}. The {index} is shifted
// to the left by one in the code below.
{
static const int kIsDoubleBit = 0;
// Check if field is in-object or out-of-object. The is_double bit value
// doesn't matter, since negative values will stay negative.
Label if_outofobject;
__ CompareInt32AndJumpIf(index, 0, kLessThan, &if_outofobject);
// The field is located in the {object} itself.
{
// See giant comment above.
static_assert(kTaggedSizeLog2 == 2);
static_assert(kSmiTagSize == 1);
// We haven't untagged, so we need to sign extend.
__ SignExtend32To64Bits(index, index);
__ LoadTaggedFieldByIndex(
result_reg, object, index, 1,
JSObject::kHeaderSize - (kIsDoubleBit << kSmiTagSize));
__ Jump(*done);
}
__ bind(&if_outofobject);
{
MaglevAssembler::ScratchRegisterScope temps(masm);
Register property_array = temps.Acquire();
// Load the property array.
__ LoadTaggedField(
property_array,
FieldMemOperand(object, JSObject::kPropertiesOrHashOffset));
// See giant comment above.
static_assert(kSmiTagSize == 1);
__ NegateInt32(index);
__ LoadTaggedFieldByIndex(
result_reg, property_array, index, 1,
FixedArray::kHeaderSize + (kIsDoubleBit << kSmiTagSize) - 4);
// Fallthrough to `done`.
}
}
__ bind(*done);
}
namespace {
template <typename NodeT, typename Function, typename... Args>

View File

@ -165,6 +165,7 @@ class CompactInterpreterFrameState;
V(LoadPolymorphicTaggedField) \
V(LoadTaggedField) \
V(LoadDoubleField) \
V(LoadTaggedFieldByFieldIndex) \
V(LoadFixedArrayElement) \
V(LoadFixedDoubleArrayElement) \
V(LoadSignedIntDataViewElement) \
@ -414,6 +415,7 @@ enum class ValueRepresentation : uint8_t {
constexpr Condition ConditionFor(Operation cond);
bool FromConstantToBool(LocalIsolate* local_isolate, ValueNode* node);
bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node);
inline int ExternalArrayElementSize(const ExternalArrayType element_type) {
@ -4298,6 +4300,29 @@ class LoadDoubleField : public FixedInputValueNodeT<1, LoadDoubleField> {
const int offset_;
};
class LoadTaggedFieldByFieldIndex
: public FixedInputValueNodeT<2, LoadTaggedFieldByFieldIndex> {
using Base = FixedInputValueNodeT<2, LoadTaggedFieldByFieldIndex>;
public:
explicit LoadTaggedFieldByFieldIndex(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties =
OpProperties::Reading() | OpProperties::DeferredCall();
static constexpr typename Base::InputTypes kInputTypes{
ValueRepresentation::kTagged, ValueRepresentation::kTagged};
static constexpr int kObjectIndex = 0;
static constexpr int kIndexIndex = 1;
Input& object_input() { return input(kObjectIndex); }
Input& index_input() { return input(kIndexIndex); }
int MaxCallStackArgs() const { return 0; }
void SetValueLocationConstraints();
void GenerateCode(MaglevAssembler*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class LoadFixedArrayElement
: public FixedInputValueNodeT<2, LoadFixedArrayElement> {
using Base = FixedInputValueNodeT<2, LoadFixedArrayElement>;

View File

@ -38,6 +38,19 @@ constexpr Condition ConditionFor(Operation operation) {
}
}
inline ScaleFactor ScaleFactorFromInt(int n) {
switch (n) {
case 1:
return times_1;
case 2:
return times_2;
case 4:
return times_4;
default:
UNREACHABLE();
}
}
class MaglevAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(MaglevAssembler* masm)
@ -223,6 +236,14 @@ inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer,
addq(data_pointer, base);
}
inline void MaglevAssembler::LoadTaggedFieldByIndex(Register result,
Register object,
Register index, int scale,
int offset) {
LoadTaggedField(
result, FieldOperand(object, index, ScaleFactorFromInt(scale), offset));
}
inline void MaglevAssembler::LoadBoundedSizeFromObject(Register result,
Register object,
int offset) {
@ -361,6 +382,7 @@ inline void MaglevAssembler::Move(Register dst, Handle<HeapObject> obj) {
inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) {
movsxlq(dst, src);
}
inline void MaglevAssembler::NegateInt32(Register val) { negl(val); }
template <typename NodeT>
inline void MaglevAssembler::DeoptIfBufferDetached(Register array,
@ -468,6 +490,26 @@ void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2,
JumpIf(cond, target, distance);
}
inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, int32_t value,
Condition cond,
Label* target,
Label::Distance distance) {
CompareInt32(r1, value);
JumpIf(cond, target, distance);
}
inline void MaglevAssembler::TestInt32AndJumpIfAnySet(
Register r1, int32_t mask, Label* target, Label::Distance distance) {
testl(r1, Immediate(mask));
JumpIf(kNotZero, target, distance);
}
inline void MaglevAssembler::TestInt32AndJumpIfAllClear(
Register r1, int32_t mask, Label* target, Label::Distance distance) {
testl(r1, Immediate(mask));
JumpIf(kZero, target, distance);
}
inline void MaglevAssembler::LoadHeapNumberValue(DoubleRegister result,
Register heap_number) {
Movsd(result, FieldOperand(heap_number, HeapNumber::kValueOffset));

View File

@ -977,19 +977,6 @@ void StoreDoubleDataViewElement::GenerateCode(MaglevAssembler* masm,
namespace {
ScaleFactor ScaleFactorFromInt(int n) {
switch (n) {
case 1:
return times_1;
case 2:
return times_2;
case 4:
return times_4;
default:
UNREACHABLE();
}
}
template <bool check_detached, typename ResultReg, typename NodeT>
void GenerateTypedArrayLoad(MaglevAssembler* masm, NodeT* node, Register object,
Register index, ResultReg result_reg,