[compiler] RawFastPropertyAt without serialization
This is a step towards making JSObjectRef non-serialized. Change JSObjectRef::RawFastPropertyAt to use a direct load with relaxed semantics. Special handling of `uninitialized` sentinel values is moved to the only use-site. A new lock `boilerplate_migration_access` protects against concurrent boilerplate migrations while we are iterating over properties. Bug: v8:7790 Change-Id: Ic9de54ca16c1f3364d497a77058cfa33d48dd4a4 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2928184 Commit-Queue: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Georg Neis <neis@chromium.org> Cr-Commit-Position: refs/heads/master@{#75033}
This commit is contained in:
parent
75d9e3b83e
commit
9bfd401ef5
@ -719,8 +719,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
|
||||
Handle<Map> map, Handle<Name> name, AccessMode access_mode) const {
|
||||
CHECK(name->IsUniqueName());
|
||||
|
||||
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(
|
||||
broker(), isolate()->map_updater_access());
|
||||
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(broker());
|
||||
|
||||
if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) {
|
||||
return Invalid();
|
||||
|
@ -802,12 +802,15 @@ class HeapNumberData : public HeapObjectData {
|
||||
Handle<HeapNumber> object,
|
||||
ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
|
||||
: HeapObjectData(broker, storage, object, kind),
|
||||
value_(object->value()) {}
|
||||
value_(object->value()),
|
||||
value_as_bits_(object->value_as_bits()) {}
|
||||
|
||||
double value() const { return value_; }
|
||||
uint64_t value_as_bits() const { return value_as_bits_; }
|
||||
|
||||
private:
|
||||
double const value_;
|
||||
uint64_t const value_as_bits_;
|
||||
};
|
||||
|
||||
class ContextData : public HeapObjectData {
|
||||
@ -1412,8 +1415,7 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
|
||||
// while the lock is held the Map object may not be modified (except in
|
||||
// benign ways).
|
||||
// TODO(jgruber): Consider removing this lock by being smrt.
|
||||
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(
|
||||
broker, broker->isolate()->map_updater_access());
|
||||
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(broker);
|
||||
|
||||
// When background serializing the map, we can perform a lite serialization
|
||||
// since the MapRef will read some of the Map's fields can be read directly.
|
||||
@ -2466,17 +2468,6 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
|
||||
DCHECK_EQ(field_index.property_index(),
|
||||
static_cast<int>(inobject_fields_.size()));
|
||||
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
|
||||
// In case of double fields we use a sentinel NaN value to mark
|
||||
// uninitialized fields. A boilerplate value with such a field may migrate
|
||||
// from its double to a tagged representation. The sentinel value carries
|
||||
// no special meaning when it occurs in a heap number, so we would like to
|
||||
// recover the uninitialized value. We check for the sentinel here,
|
||||
// specifically, since migrations might have been triggered as part of
|
||||
// boilerplate serialization.
|
||||
if (!details.representation().IsDouble() && value->IsHeapNumber() &&
|
||||
HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
|
||||
value = isolate->factory()->uninitialized_value();
|
||||
}
|
||||
ObjectData* value_data = broker->GetOrCreateData(value);
|
||||
if (value_data->IsJSObject() && !value_data->should_access_heap()) {
|
||||
value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
|
||||
@ -3026,10 +3017,12 @@ FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
|
||||
data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index));
|
||||
}
|
||||
|
||||
ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
|
||||
base::Optional<ObjectRef> JSObjectRef::RawFastPropertyAt(
|
||||
FieldIndex index) const {
|
||||
CHECK(index.is_inobject());
|
||||
if (data_->should_access_heap()) {
|
||||
return MakeRef(broker(), object()->RawFastPropertyAt(index));
|
||||
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
|
||||
return TryMakeRef(broker(),
|
||||
object()->RawFastPropertyAt(index, kRelaxedLoad));
|
||||
}
|
||||
JSObjectData* object_data = data()->AsJSObject();
|
||||
return ObjectRef(broker(),
|
||||
@ -3306,6 +3299,7 @@ BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count)
|
||||
BIMODAL_ACCESSOR(HeapObject, Map, map)
|
||||
|
||||
BIMODAL_ACCESSOR_C(HeapNumber, double, value)
|
||||
BIMODAL_ACCESSOR_C(HeapNumber, uint64_t, value_as_bits)
|
||||
|
||||
// These JSBoundFunction fields are immutable after initialization. Moreover,
|
||||
// as long as JSObjects are still serialized on the main thread, all
|
||||
@ -4053,6 +4047,10 @@ base::Optional<ObjectRef> SourceTextModuleRef::import_meta() const {
|
||||
data()->AsSourceTextModule()->GetImportMeta(broker()));
|
||||
}
|
||||
|
||||
base::Optional<MapRef> HeapObjectRef::map_direct_read() const {
|
||||
return TryMakeRef(broker(), object()->map(kAcquireLoad), kAssumeMemoryFence);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
OddballType GetOddballType(Isolate* isolate, Map map) {
|
||||
|
@ -279,6 +279,10 @@ class HeapObjectRef : public ObjectRef {
|
||||
|
||||
MapRef map() const;
|
||||
|
||||
// Only for use in special situations where we need to read the object's
|
||||
// current map (instead of returning the cached map). Use with care.
|
||||
base::Optional<MapRef> map_direct_read() const;
|
||||
|
||||
// See the comment on the HeapObjectType class.
|
||||
HeapObjectType GetHeapObjectType() const;
|
||||
};
|
||||
@ -315,7 +319,11 @@ class JSObjectRef : public JSReceiverRef {
|
||||
|
||||
Handle<JSObject> object() const;
|
||||
|
||||
ObjectRef RawFastPropertyAt(FieldIndex index) const;
|
||||
// Usable only for in-object properties. Only use this if the underlying
|
||||
// value can be an uninitialized-sentinel, or if HeapNumber construction must
|
||||
// be avoided for some reason. Otherwise, use the higher-level
|
||||
// GetOwnFastDataProperty.
|
||||
base::Optional<ObjectRef> RawFastPropertyAt(FieldIndex index) const;
|
||||
|
||||
// Return the element at key {index} if {index} is known to be an own data
|
||||
// property of the object that is non-writable and non-configurable.
|
||||
@ -439,6 +447,7 @@ class HeapNumberRef : public HeapObjectRef {
|
||||
Handle<HeapNumber> object() const;
|
||||
|
||||
double value() const;
|
||||
uint64_t value_as_bits() const;
|
||||
};
|
||||
|
||||
class ContextRef : public HeapObjectRef {
|
||||
|
@ -1656,8 +1656,21 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
|
||||
base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
|
||||
Node* effect, Node* control, JSObjectRef boilerplate,
|
||||
AllocationType allocation) {
|
||||
// Compute the in-object properties to store first (might have effects).
|
||||
// Prevent concurrent migrations of boilerplate objects.
|
||||
JSHeapBroker::BoilerplateMigrationGuardIfNeeded boilerplate_access_guard(
|
||||
broker());
|
||||
|
||||
// Now that we hold the migration lock, get the current map.
|
||||
MapRef boilerplate_map = boilerplate.map();
|
||||
base::Optional<MapRef> current_boilerplate_map =
|
||||
boilerplate.map_direct_read();
|
||||
|
||||
if (!current_boilerplate_map.has_value() ||
|
||||
!current_boilerplate_map.value().equals(boilerplate_map)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// Compute the in-object properties to store first (might have effects).
|
||||
ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
|
||||
inobject_fields.reserve(boilerplate_map.GetInObjectProperties());
|
||||
int const boilerplate_nof = boilerplate_map.NumberOfOwnDescriptors();
|
||||
@ -1678,14 +1691,48 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
|
||||
kFullWriteBarrier,
|
||||
LoadSensitivity::kUnsafe,
|
||||
const_field_info};
|
||||
ObjectRef boilerplate_value = boilerplate.RawFastPropertyAt(index);
|
||||
bool is_uninitialized =
|
||||
boilerplate_value.IsHeapObject() &&
|
||||
|
||||
// Note: the use of RawFastPropertyAt (vs. the higher-level
|
||||
// GetOwnFastDataProperty) here is necessary, since the underlying value
|
||||
// may be `uninitialized`, which the latter explicitly does not support.
|
||||
base::Optional<ObjectRef> maybe_boilerplate_value =
|
||||
boilerplate.RawFastPropertyAt(index);
|
||||
if (!maybe_boilerplate_value.has_value()) return {};
|
||||
|
||||
// Note: We don't need to take a compilation dependency verifying the value
|
||||
// of `boilerplate_value`, since boilerplate properties are constant after
|
||||
// initialization modulo map migration. We protect against concurrent map
|
||||
// migrations via the boilerplate_migration_access lock.
|
||||
ObjectRef boilerplate_value = maybe_boilerplate_value.value();
|
||||
|
||||
// Uninitialized fields are marked through the `uninitialized_value`, or in
|
||||
// the case of double representation through a HeapNumber containing a
|
||||
// special sentinel value. The boilerplate value is not updated to keep up
|
||||
// with the map, thus conversions may be necessary. Specifically:
|
||||
//
|
||||
// - Smi representation: uninitialized is converted to Smi 0.
|
||||
// - Not double representation: the special heap number sentinel is
|
||||
// converted to `uninitialized_value`.
|
||||
//
|
||||
// Note that although we create nodes to write `uninitialized_value` into
|
||||
// the object, the field should be overwritten immediately with a real
|
||||
// value, and `uninitialized_value` should never be exposed to JS.
|
||||
bool is_uninitialized = false;
|
||||
if (boilerplate_value.IsHeapObject() &&
|
||||
boilerplate_value.AsHeapObject().map().oddball_type() ==
|
||||
OddballType::kUninitialized;
|
||||
if (is_uninitialized) {
|
||||
OddballType::kUninitialized) {
|
||||
is_uninitialized = true;
|
||||
access.const_field_info = ConstFieldInfo::None();
|
||||
} else if (!property_details.representation().IsDouble() &&
|
||||
boilerplate_value.IsHeapNumber() &&
|
||||
boilerplate_value.AsHeapNumber().value_as_bits() ==
|
||||
kHoleNanInt64) {
|
||||
is_uninitialized = true;
|
||||
boilerplate_value =
|
||||
MakeRef<HeapObject>(broker(), factory()->uninitialized_value());
|
||||
access.const_field_info = ConstFieldInfo::None();
|
||||
}
|
||||
|
||||
Node* value;
|
||||
if (boilerplate_value.IsJSObject()) {
|
||||
JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
|
||||
|
@ -354,30 +354,44 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
|
||||
|
||||
// Locks {mutex} through the duration of this scope iff it is the first
|
||||
// occurrence. This is done to have a recursive shared lock on {mutex}.
|
||||
class V8_NODISCARD MapUpdaterGuardIfNeeded final {
|
||||
public:
|
||||
explicit MapUpdaterGuardIfNeeded(JSHeapBroker* ptr,
|
||||
base::SharedMutex* mutex)
|
||||
: ptr_(ptr),
|
||||
initial_map_updater_mutex_depth_(ptr->map_updater_mutex_depth_),
|
||||
shared_mutex(mutex, should_lock()) {
|
||||
ptr_->map_updater_mutex_depth_++;
|
||||
class V8_NODISCARD RecursiveSharedMutexGuardIfNeeded {
|
||||
protected:
|
||||
RecursiveSharedMutexGuardIfNeeded(base::SharedMutex* mutex,
|
||||
int* mutex_depth_address)
|
||||
: mutex_depth_address_(mutex_depth_address),
|
||||
initial_mutex_depth_(*mutex_depth_address_),
|
||||
shared_mutex_guard_(mutex, initial_mutex_depth_ == 0) {
|
||||
(*mutex_depth_address_)++;
|
||||
}
|
||||
|
||||
~MapUpdaterGuardIfNeeded() {
|
||||
ptr_->map_updater_mutex_depth_--;
|
||||
DCHECK_EQ(initial_map_updater_mutex_depth_,
|
||||
ptr_->map_updater_mutex_depth_);
|
||||
~RecursiveSharedMutexGuardIfNeeded() {
|
||||
DCHECK_GE((*mutex_depth_address_), 1);
|
||||
(*mutex_depth_address_)--;
|
||||
DCHECK_EQ(initial_mutex_depth_, (*mutex_depth_address_));
|
||||
}
|
||||
|
||||
// Whether the MapUpdater mutex should be physically locked (if not, we
|
||||
// already hold the lock).
|
||||
bool should_lock() const { return initial_map_updater_mutex_depth_ == 0; }
|
||||
|
||||
private:
|
||||
JSHeapBroker* const ptr_;
|
||||
const int initial_map_updater_mutex_depth_;
|
||||
base::SharedMutexGuardIf<base::kShared> shared_mutex;
|
||||
int* const mutex_depth_address_;
|
||||
const int initial_mutex_depth_;
|
||||
base::SharedMutexGuardIf<base::kShared> shared_mutex_guard_;
|
||||
};
|
||||
|
||||
class MapUpdaterGuardIfNeeded final
|
||||
: public RecursiveSharedMutexGuardIfNeeded {
|
||||
public:
|
||||
explicit MapUpdaterGuardIfNeeded(JSHeapBroker* broker)
|
||||
: RecursiveSharedMutexGuardIfNeeded(
|
||||
broker->isolate()->map_updater_access(),
|
||||
&broker->map_updater_mutex_depth_) {}
|
||||
};
|
||||
|
||||
class BoilerplateMigrationGuardIfNeeded final
|
||||
: public RecursiveSharedMutexGuardIfNeeded {
|
||||
public:
|
||||
explicit BoilerplateMigrationGuardIfNeeded(JSHeapBroker* broker)
|
||||
: RecursiveSharedMutexGuardIfNeeded(
|
||||
broker->isolate()->boilerplate_migration_access(),
|
||||
&broker->boilerplate_migration_mutex_depth_) {}
|
||||
};
|
||||
|
||||
private:
|
||||
@ -502,6 +516,8 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
|
||||
// holds the locking depth, i.e. how many times the mutex has been
|
||||
// recursively locked. Only the outermost locker actually locks underneath.
|
||||
int map_updater_mutex_depth_ = 0;
|
||||
// Likewise for boilerplate migrations.
|
||||
int boilerplate_migration_mutex_depth_ = 0;
|
||||
|
||||
static constexpr size_t kMaxSerializedFunctionsCacheSize = 200;
|
||||
static constexpr uint32_t kMinimalRefsBucketCount = 8;
|
||||
|
@ -672,8 +672,21 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
|
||||
return &shared_function_info_access_;
|
||||
}
|
||||
|
||||
// Protects (most) map update operations, see also MapUpdater.
|
||||
base::SharedMutex* map_updater_access() { return &map_updater_access_; }
|
||||
|
||||
// Protects JSObject boilerplate migrations (i.e. calls to MigrateInstance on
|
||||
// boilerplate objects; elements kind transitions are *not* protected).
|
||||
// Note this lock interacts with `map_updater_access` as follows
|
||||
//
|
||||
// - boilerplate migrations may trigger map updates.
|
||||
// - if so, `boilerplate_migration_access` is locked before
|
||||
// `map_updater_access`.
|
||||
// - backgrounds threads must use the same lock order to avoid deadlocks.
|
||||
base::SharedMutex* boilerplate_migration_access() {
|
||||
return &boilerplate_migration_access_;
|
||||
}
|
||||
|
||||
// The isolate's string table.
|
||||
StringTable* string_table() const { return string_table_.get(); }
|
||||
|
||||
@ -1936,6 +1949,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
|
||||
base::SharedMutex full_transition_array_access_;
|
||||
base::SharedMutex shared_function_info_access_;
|
||||
base::SharedMutex map_updater_access_;
|
||||
base::SharedMutex boilerplate_migration_access_;
|
||||
Logger* logger_ = nullptr;
|
||||
StubCache* load_stub_cache_ = nullptr;
|
||||
StubCache* store_stub_cache_ = nullptr;
|
||||
|
@ -342,6 +342,12 @@ Object JSObject::RawFastPropertyAt(PtrComprCageBase cage_base,
|
||||
}
|
||||
}
|
||||
|
||||
Object JSObject::RawFastPropertyAt(FieldIndex index, RelaxedLoadTag tag) const {
|
||||
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
|
||||
CHECK(index.is_inobject());
|
||||
return TaggedField<Object>::Relaxed_Load(cage_base, *this, index.offset());
|
||||
}
|
||||
|
||||
void JSObject::RawFastInobjectPropertyAtPut(FieldIndex index, Object value,
|
||||
WriteBarrierMode mode) {
|
||||
DCHECK(index.is_inobject());
|
||||
|
@ -651,6 +651,9 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
|
||||
inline Object RawFastPropertyAt(FieldIndex index) const;
|
||||
inline Object RawFastPropertyAt(PtrComprCageBase cage_base,
|
||||
FieldIndex index) const;
|
||||
// A specialized version of the above for use by TurboFan. Only supports
|
||||
// in-object properties.
|
||||
inline Object RawFastPropertyAt(FieldIndex index, RelaxedLoadTag) const;
|
||||
|
||||
inline void FastPropertyAtPut(FieldIndex index, Object value,
|
||||
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
|
||||
|
@ -84,6 +84,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
|
||||
}
|
||||
|
||||
if (object->map(isolate).is_deprecated()) {
|
||||
base::SharedMutexGuard<base::kExclusive> mutex_guard(
|
||||
isolate->boilerplate_migration_access());
|
||||
JSObject::MigrateInstance(isolate, object);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user