[dict-proto] C++ implementation of SwissNameDictionary, pt. 7
This CL is part of a series that adds the C++ implementation of SwissNameDictionary, a deterministic property backing store based on Swiss Tables. This CL adds an in-place version of Rehash and a small change to HeapObject::HeapObjectShortPrint. Bug: v8:11388 Change-Id: I578ef8f5638a6201111602ffef7f2d4a5a257bcd Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2720305 Reviewed-by: Igor Sheludko <ishell@chromium.org> Reviewed-by: Marja Hölttä <marja@chromium.org> Commit-Queue: Frank Emrich <emrich@google.com> Cr-Commit-Position: refs/heads/master@{#73099}
This commit is contained in:
parent
ea5ba5dbb0
commit
342f66210a
@ -1960,6 +1960,10 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
|
||||
case NAME_DICTIONARY_TYPE:
|
||||
os << "<NameDictionary[" << FixedArray::cast(*this).length() << "]>";
|
||||
break;
|
||||
case SWISS_NAME_DICTIONARY_TYPE:
|
||||
os << "<SwissNameDictionary["
|
||||
<< SwissNameDictionary::cast(*this).Capacity() << "]>";
|
||||
break;
|
||||
case GLOBAL_DICTIONARY_TYPE:
|
||||
os << "<GlobalDictionary[" << FixedArray::cast(*this).length() << "]>";
|
||||
break;
|
||||
@ -2351,6 +2355,7 @@ bool HeapObject::NeedsRehashing(InstanceType instance_type) const {
|
||||
case SMALL_ORDERED_HASH_MAP_TYPE:
|
||||
case SMALL_ORDERED_HASH_SET_TYPE:
|
||||
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
|
||||
case SWISS_NAME_DICTIONARY_TYPE:
|
||||
case JS_MAP_TYPE:
|
||||
case JS_SET_TYPE:
|
||||
return true;
|
||||
@ -2374,6 +2379,7 @@ bool HeapObject::CanBeRehashed() const {
|
||||
case GLOBAL_DICTIONARY_TYPE:
|
||||
case NUMBER_DICTIONARY_TYPE:
|
||||
case SIMPLE_NUMBER_DICTIONARY_TYPE:
|
||||
case SWISS_NAME_DICTIONARY_TYPE:
|
||||
return true;
|
||||
case DESCRIPTOR_ARRAY_TYPE:
|
||||
case STRONG_DESCRIPTOR_ARRAY_TYPE:
|
||||
@ -2399,6 +2405,9 @@ void HeapObject::RehashBasedOnMap(Isolate* isolate) {
|
||||
case NAME_DICTIONARY_TYPE:
|
||||
NameDictionary::cast(*this).Rehash(isolate);
|
||||
break;
|
||||
case SWISS_NAME_DICTIONARY_TYPE:
|
||||
SwissNameDictionary::cast(*this).Rehash(isolate);
|
||||
break;
|
||||
case GLOBAL_DICTIONARY_TYPE:
|
||||
GlobalDictionary::cast(*this).Rehash(isolate);
|
||||
break;
|
||||
|
@ -94,6 +94,60 @@ Handle<SwissNameDictionary> SwissNameDictionary::Shrink(
|
||||
return Rehash(isolate, table, new_capacity);
|
||||
}
|
||||
|
||||
// TODO(v8::11388) Copying all data into a std::vector and then re-adding into
|
||||
// the table doesn't seem like a good algorithm. Abseil's Swiss Tables come with
|
||||
// a clever algorithm for re-hashing in place: It first changes the control
|
||||
// table, effectively changing the roles of full, empty and deleted buckets. It
|
||||
// then moves each entry to its new bucket by swapping entries (see
|
||||
// drop_deletes_without_resize in Abseil's raw_hash_set.h). This algorithm could
|
||||
// generally adapted to work on our insertion order preserving implementation,
|
||||
// too. However, it would require a mapping from hash table buckets back to
|
||||
// enumeration indices. This could either be be created in this function
|
||||
// (requiring a vector with Capacity() entries and a separate pass over the
|
||||
// enumeration table) or by creating this backwards mapping ahead of time and
|
||||
// storing it somewhere in the main table or the meta table, for those
|
||||
// SwissNameDictionaries that we know will be in-place rehashed, most notably
|
||||
// those stored in the snapshot.
|
||||
void SwissNameDictionary::Rehash(Isolate* isolate) {
|
||||
DisallowHeapAllocation no_gc;
|
||||
|
||||
struct Entry {
|
||||
Name key;
|
||||
Object value;
|
||||
PropertyDetails details;
|
||||
};
|
||||
|
||||
if (Capacity() == 0) return;
|
||||
|
||||
Entry dummy{Name(), Object(), PropertyDetails::Empty()};
|
||||
std::vector<Entry> data(NumberOfElements(), dummy);
|
||||
|
||||
ReadOnlyRoots roots(isolate);
|
||||
int data_index = 0;
|
||||
for (int enum_index = 0; enum_index < UsedCapacity(); ++enum_index) {
|
||||
int entry = EntryForEnumerationIndex(enum_index);
|
||||
Object key;
|
||||
if (!ToKey(roots, entry, &key)) continue;
|
||||
|
||||
data[data_index++] =
|
||||
Entry{Name::cast(key), ValueAtRaw(entry), DetailsAt(entry)};
|
||||
}
|
||||
|
||||
Initialize(isolate, meta_table(), Capacity());
|
||||
|
||||
int new_enum_index = 0;
|
||||
SetNumberOfElements(static_cast<int>(data.size()));
|
||||
for (Entry& e : data) {
|
||||
int new_entry = AddInternal(e.key, e.value, e.details);
|
||||
|
||||
// TODO(v8::11388) Investigate ways of hoisting the branching needed to
|
||||
// select the correct meta table entry size (based on the capacity of the
|
||||
// table) out of the loop.
|
||||
SetEntryForEnumerationIndex(new_enum_index, new_entry);
|
||||
++new_enum_index;
|
||||
}
|
||||
}
|
||||
|
||||
// The largest value we ever have to store in the enumeration table is
|
||||
// Capacity() - 1. The largest value we ever have to store for the present or
|
||||
// deleted element count is MaxUsableCapacity(Capacity()). All data in the
|
||||
|
@ -119,6 +119,7 @@ class SwissNameDictionary : public HeapObject {
|
||||
static Handle<SwissNameDictionary> Rehash(LocalIsolate* isolate,
|
||||
Handle<SwissNameDictionary> table,
|
||||
int new_capacity);
|
||||
void Rehash(Isolate* isolate);
|
||||
|
||||
inline void SetHash(int hash);
|
||||
inline int Hash();
|
||||
|
Loading…
Reference in New Issue
Block a user