Reland "[maglev] Optimize monomorphic keyed loads"
This is a reland of commit 133e7f8362
Reland: Rebase onto v8_multi_arch_build fix.
Original change's description:
> [maglev] Optimize monomorphic keyed loads
>
> Add a fast path for keyed loads that are:
>
> 1. Monomorphic,
> 2. Fast elements accesses,
> 3. Not out-of-bounds (deopt on OOB),
> 4. Not holey
>
> Bug: v8:7700
> Change-Id: I4d46f4d0ce7065c93a9b092833fb16a8c9e9f94e
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3882974
> Auto-Submit: Leszek Swirski <leszeks@chromium.org>
> Reviewed-by: Jakob Linke <jgruber@chromium.org>
> Commit-Queue: Leszek Swirski <leszeks@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#83149}
Bug: v8:7700
Change-Id: Ib48bdc8729757527c19d0b24864f8eab0570c3f3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890920
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Reviewed-by: Jakob Linke <jgruber@chromium.org>
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83156}
This commit is contained in:
parent
a9327e9394
commit
3501fca7e5
@ -1265,7 +1265,6 @@ void FeedbackNexus::Print(std::ostream& os) {
|
||||
case FeedbackSlotKind::kDefineKeyedOwn:
|
||||
case FeedbackSlotKind::kHasKeyed:
|
||||
case FeedbackSlotKind::kInstanceOf:
|
||||
case FeedbackSlotKind::kLoadKeyed:
|
||||
case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral:
|
||||
case FeedbackSlotKind::kStoreGlobalSloppy:
|
||||
case FeedbackSlotKind::kStoreGlobalStrict:
|
||||
@ -1291,6 +1290,7 @@ void FeedbackNexus::Print(std::ostream& os) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case FeedbackSlotKind::kLoadKeyed:
|
||||
case FeedbackSlotKind::kLoadProperty: {
|
||||
os << InlineCacheState2String(ic_state());
|
||||
if (ic_state() == InlineCacheState::MONOMORPHIC) {
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "src/maglev/maglev-compilation-unit.h"
|
||||
#include "src/maglev/maglev-interpreter-frame-state.h"
|
||||
#include "src/maglev/maglev-ir.h"
|
||||
#include "src/objects/elements-kind.h"
|
||||
#include "src/objects/feedback-vector.h"
|
||||
#include "src/objects/literal-objects-inl.h"
|
||||
#include "src/objects/name-inl.h"
|
||||
@ -1133,6 +1134,60 @@ bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromLoadHandler(
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MaglevGraphBuilder::TryBuildMonomorphicElementLoad(
|
||||
ValueNode* object, ValueNode* index, const compiler::MapRef& map,
|
||||
MaybeObjectHandle handler) {
|
||||
if (handler.is_null()) return false;
|
||||
|
||||
if (handler->IsSmi()) {
|
||||
return TryBuildMonomorphicElementLoadFromSmiHandler(
|
||||
object, index, map, handler->ToSmi().value());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MaglevGraphBuilder::TryBuildMonomorphicElementLoadFromSmiHandler(
|
||||
ValueNode* object, ValueNode* index, const compiler::MapRef& map,
|
||||
int32_t handler) {
|
||||
LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler);
|
||||
|
||||
switch (kind) {
|
||||
case LoadHandler::Kind::kElement: {
|
||||
if (LoadHandler::AllowOutOfBoundsBits::decode(handler)) {
|
||||
return false;
|
||||
}
|
||||
ElementsKind elements_kind =
|
||||
LoadHandler::ElementsKindBits::decode(handler);
|
||||
if (!IsFastElementsKind(elements_kind)) return false;
|
||||
|
||||
// TODO(leszeks): Handle holey elements.
|
||||
if (IsHoleyElementsKind(elements_kind)) return false;
|
||||
DCHECK(!LoadHandler::ConvertHoleBits::decode(handler));
|
||||
|
||||
BuildMapCheck(object, map);
|
||||
BuildCheckSmi(index);
|
||||
|
||||
if (LoadHandler::IsJsArrayBits::decode(handler)) {
|
||||
DCHECK(map.IsJSArrayMap());
|
||||
AddNewNode<CheckJSArrayBounds>({object, index});
|
||||
} else {
|
||||
DCHECK(!map.IsJSArrayMap());
|
||||
DCHECK(map.IsJSObjectMap());
|
||||
AddNewNode<CheckJSObjectElementsBounds>({object, index});
|
||||
}
|
||||
if (elements_kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
|
||||
SetAccumulator(AddNewNode<LoadDoubleElement>({object, index}));
|
||||
} else {
|
||||
DCHECK(!IsDoubleElementsKind(elements_kind));
|
||||
SetAccumulator(AddNewNode<LoadTaggedElement>({object, index}));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void MaglevGraphBuilder::VisitGetNamedProperty() {
|
||||
// GetNamedProperty <object> <name_index> <slot>
|
||||
ValueNode* object = LoadRegisterTagged(0);
|
||||
@ -1226,6 +1281,8 @@ void MaglevGraphBuilder::VisitGetNamedPropertyFromSuper() {
|
||||
void MaglevGraphBuilder::VisitGetKeyedProperty() {
|
||||
// GetKeyedProperty <object> <slot>
|
||||
ValueNode* object = LoadRegisterTagged(0);
|
||||
// TODO(leszeks): We don't need to tag the key if it's an Int32 and a simple
|
||||
// monomorphic element load.
|
||||
ValueNode* key = GetAccumulatorTagged();
|
||||
FeedbackSlot slot = GetSlotOperand(1);
|
||||
compiler::FeedbackSource feedback_source{feedback(), slot};
|
||||
@ -1240,6 +1297,22 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
|
||||
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
|
||||
return;
|
||||
|
||||
case compiler::ProcessedFeedback::kElementAccess: {
|
||||
const compiler::ElementAccessFeedback& element_feedback =
|
||||
processed_feedback.AsElementAccess();
|
||||
if (element_feedback.transition_groups().size() != 1) break;
|
||||
compiler::MapRef map = MakeRefAssumeMemoryFence(
|
||||
broker(), element_feedback.transition_groups()[0].front());
|
||||
|
||||
// Monomorphic load, check the handler.
|
||||
// TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
|
||||
MaybeObjectHandle handler =
|
||||
FeedbackNexusForSlot(slot).FindHandlerForMap(map.object());
|
||||
|
||||
if (TryBuildMonomorphicElementLoad(object, key, map, handler)) return;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -975,6 +975,14 @@ class MaglevGraphBuilder {
|
||||
const compiler::MapRef& map,
|
||||
LoadHandler handler);
|
||||
|
||||
bool TryBuildMonomorphicElementLoad(ValueNode* object, ValueNode* index,
|
||||
const compiler::MapRef& map,
|
||||
MaybeObjectHandle handler);
|
||||
bool TryBuildMonomorphicElementLoadFromSmiHandler(ValueNode* object,
|
||||
ValueNode* index,
|
||||
const compiler::MapRef& map,
|
||||
int32_t handler);
|
||||
|
||||
bool TryBuildMonomorphicStore(ValueNode* object, const compiler::MapRef& map,
|
||||
MaybeObjectHandle handler);
|
||||
bool TryBuildMonomorphicStoreFromSmiHandler(ValueNode* object,
|
||||
|
@ -175,6 +175,10 @@ class MaglevGraphVerifier {
|
||||
case Opcode::kGenericLessThan:
|
||||
case Opcode::kGenericLessThanOrEqual:
|
||||
case Opcode::kGenericStrictEqual:
|
||||
case Opcode::kCheckJSArrayBounds:
|
||||
case Opcode::kCheckJSObjectElementsBounds:
|
||||
case Opcode::kLoadTaggedElement:
|
||||
case Opcode::kLoadDoubleElement:
|
||||
case Opcode::kGetIterator:
|
||||
case Opcode::kTaggedEqual:
|
||||
case Opcode::kTaggedNotEqual:
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "src/maglev/maglev-interpreter-frame-state.h"
|
||||
#include "src/maglev/maglev-ir-inl.h"
|
||||
#include "src/maglev/maglev-vreg-allocator.h"
|
||||
#include "src/objects/instance-type.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -1374,6 +1375,56 @@ void CheckMapsWithMigration::PrintParams(
|
||||
os << "(" << *map().object() << ")";
|
||||
}
|
||||
|
||||
void CheckJSArrayBounds::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(receiver_input());
|
||||
UseRegister(index_input());
|
||||
}
|
||||
void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
Register object = ToRegister(receiver_input());
|
||||
Register index = ToRegister(index_input());
|
||||
__ AssertNotSmi(object);
|
||||
__ AssertSmi(index);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ CmpObjectType(object, JS_ARRAY_TYPE, kScratchRegister);
|
||||
__ Assert(equal, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
TaggedRegister length(kScratchRegister);
|
||||
__ LoadAnyTaggedField(length, FieldOperand(object, JSArray::kLengthOffset));
|
||||
__ cmp_tagged(index, length.reg());
|
||||
__ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
|
||||
}
|
||||
|
||||
void CheckJSObjectElementsBounds::AllocateVreg(
|
||||
MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(receiver_input());
|
||||
UseRegister(index_input());
|
||||
}
|
||||
void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
Register object = ToRegister(receiver_input());
|
||||
Register index = ToRegister(index_input());
|
||||
__ AssertNotSmi(object);
|
||||
__ AssertSmi(index);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister);
|
||||
__ Assert(greater_equal, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
__ LoadAnyTaggedField(
|
||||
kScratchRegister,
|
||||
FieldOperand(object, JSReceiver::kPropertiesOrHashOffset));
|
||||
if (FLAG_debug_code) {
|
||||
__ AssertNotSmi(kScratchRegister);
|
||||
}
|
||||
TaggedRegister length(kScratchRegister);
|
||||
__ LoadAnyTaggedField(
|
||||
length, FieldOperand(kScratchRegister, FixedArray::kLengthOffset));
|
||||
__ cmp_tagged(index, length.reg());
|
||||
__ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
|
||||
}
|
||||
|
||||
void CheckedInternalizedString::AllocateVreg(
|
||||
MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(object_input());
|
||||
@ -1463,6 +1514,86 @@ void LoadDoubleField::PrintParams(std::ostream& os,
|
||||
os << "(0x" << std::hex << offset() << std::dec << ")";
|
||||
}
|
||||
|
||||
void LoadTaggedElement::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(object_input());
|
||||
UseRegister(index_input());
|
||||
DefineAsRegister(vreg_state, this);
|
||||
}
|
||||
void LoadTaggedElement::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
Register object = ToRegister(object_input());
|
||||
Register index = ToRegister(index_input());
|
||||
Register result_reg = ToRegister(result());
|
||||
__ AssertNotSmi(object);
|
||||
if (FLAG_debug_code) {
|
||||
__ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister);
|
||||
__ Assert(above_equal, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
__ DecompressAnyTagged(kScratchRegister,
|
||||
FieldOperand(object, JSObject::kElementsOffset));
|
||||
if (FLAG_debug_code) {
|
||||
__ CmpObjectType(kScratchRegister, FIXED_ARRAY_TYPE, kScratchRegister);
|
||||
__ Assert(equal, AbortReason::kUnexpectedValue);
|
||||
// Reload since CmpObjectType clobbered the scratch register.
|
||||
__ DecompressAnyTagged(kScratchRegister,
|
||||
FieldOperand(object, JSObject::kElementsOffset));
|
||||
}
|
||||
__ AssertSmi(index);
|
||||
// Zero out top bits of index reg (these were previously either zero already,
|
||||
// or the cage base). This technically mutates it, but since it's a Smi, that
|
||||
// doesn't matter.
|
||||
__ movl(index, index);
|
||||
static_assert(kSmiTagSize + kSmiShiftSize < times_tagged_size,
|
||||
"Folding the Smi shift into the FixedArray entry size shift "
|
||||
"only works if the shift is small");
|
||||
__ DecompressAnyTagged(
|
||||
result_reg,
|
||||
FieldOperand(kScratchRegister, index,
|
||||
static_cast<ScaleFactor>(times_tagged_size -
|
||||
(kSmiTagSize + kSmiShiftSize)),
|
||||
FixedArray::kHeaderSize));
|
||||
}
|
||||
|
||||
void LoadDoubleElement::AllocateVreg(MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(object_input());
|
||||
UseRegister(index_input());
|
||||
DefineAsRegister(vreg_state, this);
|
||||
}
|
||||
void LoadDoubleElement::GenerateCode(MaglevAssembler* masm,
|
||||
const ProcessingState& state) {
|
||||
Register object = ToRegister(object_input());
|
||||
Register index = ToRegister(index_input());
|
||||
DoubleRegister result_reg = ToDoubleRegister(result());
|
||||
__ AssertNotSmi(object);
|
||||
if (FLAG_debug_code) {
|
||||
__ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister);
|
||||
__ Assert(above_equal, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
__ DecompressAnyTagged(kScratchRegister,
|
||||
FieldOperand(object, JSObject::kElementsOffset));
|
||||
if (FLAG_debug_code) {
|
||||
__ CmpObjectType(kScratchRegister, FIXED_DOUBLE_ARRAY_TYPE,
|
||||
kScratchRegister);
|
||||
__ Assert(equal, AbortReason::kUnexpectedValue);
|
||||
// Reload since CmpObjectType clobbered the scratch register.
|
||||
__ DecompressAnyTagged(kScratchRegister,
|
||||
FieldOperand(object, JSObject::kElementsOffset));
|
||||
}
|
||||
__ AssertSmi(index);
|
||||
// Zero out top bits of index reg (these were previously either zero already,
|
||||
// or the cage base). This technically mutates it, but since it's a Smi, that
|
||||
// doesn't matter.
|
||||
__ movl(index, index);
|
||||
static_assert(kSmiTagSize + kSmiShiftSize < times_8,
|
||||
"Folding the Smi shift into the FixedArray entry size shift "
|
||||
"only works if the shift is small");
|
||||
__ Movsd(result_reg,
|
||||
FieldOperand(kScratchRegister, index,
|
||||
static_cast<ScaleFactor>(times_8 -
|
||||
(kSmiTagSize + kSmiShiftSize)),
|
||||
FixedDoubleArray::kHeaderSize));
|
||||
}
|
||||
|
||||
void StoreTaggedFieldNoWriteBarrier::AllocateVreg(
|
||||
MaglevVregAllocationState* vreg_state) {
|
||||
UseRegister(object_input());
|
||||
|
@ -144,6 +144,8 @@ class CompactInterpreterFrameState;
|
||||
V(InitialValue) \
|
||||
V(LoadTaggedField) \
|
||||
V(LoadDoubleField) \
|
||||
V(LoadTaggedElement) \
|
||||
V(LoadDoubleElement) \
|
||||
V(LoadGlobal) \
|
||||
V(LoadNamedGeneric) \
|
||||
V(LoadNamedFromSuperGeneric) \
|
||||
@ -192,6 +194,8 @@ class CompactInterpreterFrameState;
|
||||
V(CheckSymbol) \
|
||||
V(CheckString) \
|
||||
V(CheckMapsWithMigration) \
|
||||
V(CheckJSArrayBounds) \
|
||||
V(CheckJSObjectElementsBounds) \
|
||||
V(GeneratorStore) \
|
||||
V(JumpLoopPrologue) \
|
||||
V(StoreTaggedFieldNoWriteBarrier) \
|
||||
@ -2663,6 +2667,39 @@ class CheckMapsWithMigration
|
||||
const CheckType check_type_;
|
||||
};
|
||||
|
||||
class CheckJSArrayBounds : public FixedInputNodeT<2, CheckJSArrayBounds> {
|
||||
using Base = FixedInputNodeT<2, CheckJSArrayBounds>;
|
||||
|
||||
public:
|
||||
explicit CheckJSArrayBounds(uint64_t bitfield) : Base(bitfield) {}
|
||||
|
||||
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
|
||||
|
||||
static constexpr int kReceiverIndex = 0;
|
||||
static constexpr int kIndexIndex = 1;
|
||||
Input& receiver_input() { return input(kReceiverIndex); }
|
||||
Input& index_input() { return input(kIndexIndex); }
|
||||
|
||||
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
|
||||
};
|
||||
|
||||
class CheckJSObjectElementsBounds
|
||||
: public FixedInputNodeT<2, CheckJSObjectElementsBounds> {
|
||||
using Base = FixedInputNodeT<2, CheckJSObjectElementsBounds>;
|
||||
|
||||
public:
|
||||
explicit CheckJSObjectElementsBounds(uint64_t bitfield) : Base(bitfield) {}
|
||||
|
||||
static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
|
||||
|
||||
static constexpr int kReceiverIndex = 0;
|
||||
static constexpr int kIndexIndex = 1;
|
||||
Input& receiver_input() { return input(kReceiverIndex); }
|
||||
Input& index_input() { return input(kIndexIndex); }
|
||||
|
||||
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
|
||||
};
|
||||
|
||||
class CheckedInternalizedString
|
||||
: public FixedInputValueNodeT<1, CheckedInternalizedString> {
|
||||
using Base = FixedInputValueNodeT<1, CheckedInternalizedString>;
|
||||
@ -2758,6 +2795,39 @@ class LoadDoubleField : public FixedInputValueNodeT<1, LoadDoubleField> {
|
||||
const int offset_;
|
||||
};
|
||||
|
||||
class LoadTaggedElement : public FixedInputValueNodeT<2, LoadTaggedElement> {
|
||||
using Base = FixedInputValueNodeT<2, LoadTaggedElement>;
|
||||
|
||||
public:
|
||||
explicit LoadTaggedElement(uint64_t bitfield) : Base(bitfield) {}
|
||||
|
||||
static constexpr OpProperties kProperties = OpProperties::Reading();
|
||||
|
||||
static constexpr int kObjectIndex = 0;
|
||||
static constexpr int kIndexIndex = 1;
|
||||
Input& object_input() { return input(kObjectIndex); }
|
||||
Input& index_input() { return input(kIndexIndex); }
|
||||
|
||||
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
|
||||
};
|
||||
|
||||
class LoadDoubleElement : public FixedInputValueNodeT<2, LoadDoubleElement> {
|
||||
using Base = FixedInputValueNodeT<2, LoadDoubleElement>;
|
||||
|
||||
public:
|
||||
explicit LoadDoubleElement(uint64_t bitfield) : Base(bitfield) {}
|
||||
|
||||
static constexpr OpProperties kProperties =
|
||||
OpProperties::Reading() | OpProperties::Float64();
|
||||
|
||||
static constexpr int kObjectIndex = 0;
|
||||
static constexpr int kIndexIndex = 1;
|
||||
Input& object_input() { return input(kObjectIndex); }
|
||||
Input& index_input() { return input(kIndexIndex); }
|
||||
|
||||
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
|
||||
};
|
||||
|
||||
class StoreTaggedFieldNoWriteBarrier
|
||||
: public FixedInputNodeT<2, StoreTaggedFieldNoWriteBarrier> {
|
||||
using Base = FixedInputNodeT<2, StoreTaggedFieldNoWriteBarrier>;
|
||||
|
Loading…
Reference in New Issue
Block a user