[hashtable] Move data table to the beginning

TBR: hpayer@chromium.org
Bug: v8:6443, v8:7569
Change-Id: Idd952ed0a832c469b76f1cbc919f700e09dc975d
Reviewed-on: https://chromium-review.googlesource.com/1031559
Commit-Queue: Sathya Gunasekaran <gsathya@chromium.org>
Reviewed-by: Camillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52930}
This commit is contained in:
Sathya Gunasekaran 2018-05-02 17:26:15 -07:00 committed by Commit Bot
parent a9e2b2ce20
commit cfc79faa93
5 changed files with 193 additions and 135 deletions

View File

@ -441,7 +441,7 @@ Handle<SmallOrderedHashSet> Factory::NewSmallOrderedHashSet(
CHECK_LE(capacity, SmallOrderedHashSet::kMaxCapacity); CHECK_LE(capacity, SmallOrderedHashSet::kMaxCapacity);
DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor); DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
int size = SmallOrderedHashSet::Size(capacity); int size = SmallOrderedHashSet::SizeFor(capacity);
Map* map = *small_ordered_hash_set_map(); Map* map = *small_ordered_hash_set_map();
HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map); HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result), Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result),
@ -456,7 +456,7 @@ Handle<SmallOrderedHashMap> Factory::NewSmallOrderedHashMap(
CHECK_LE(capacity, SmallOrderedHashMap::kMaxCapacity); CHECK_LE(capacity, SmallOrderedHashMap::kMaxCapacity);
DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor); DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
int size = SmallOrderedHashMap::Size(capacity); int size = SmallOrderedHashMap::SizeFor(capacity);
Map* map = *small_ordered_hash_map_map(); Map* map = *small_ordered_hash_map_map();
HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map); HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result), Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result),

View File

@ -181,7 +181,8 @@ class SmallOrderedHashTable<Derived>::BodyDescriptor final
public: public:
static bool IsValidSlot(Map* map, HeapObject* obj, int offset) { static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
Derived* table = reinterpret_cast<Derived*>(obj); Derived* table = reinterpret_cast<Derived*>(obj);
if (offset < table->GetDataTableStartOffset()) return false; if (offset < kDataTableStartOffset) return false;
if (offset >= table->GetBucketsStartOffset()) return false;
return IsValidSlotImpl(map, obj, offset); return IsValidSlotImpl(map, obj, offset);
} }
@ -189,7 +190,7 @@ class SmallOrderedHashTable<Derived>::BodyDescriptor final
static inline void IterateBody(Map* map, HeapObject* obj, int object_size, static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) { ObjectVisitor* v) {
Derived* table = reinterpret_cast<Derived*>(obj); Derived* table = reinterpret_cast<Derived*>(obj);
int start = table->GetDataTableStartOffset(); int start = kDataTableStartOffset;
for (int i = 0; i < table->Capacity(); i++) { for (int i = 0; i < table->Capacity(); i++) {
IteratePointer(obj, start + (i * kPointerSize), v); IteratePointer(obj, start + (i * kPointerSize), v);
} }
@ -197,7 +198,7 @@ class SmallOrderedHashTable<Derived>::BodyDescriptor final
static inline int SizeOf(Map* map, HeapObject* obj) { static inline int SizeOf(Map* map, HeapObject* obj) {
Derived* table = reinterpret_cast<Derived*>(obj); Derived* table = reinterpret_cast<Derived*>(obj);
return table->Size(); return table->SizeFor(table->Capacity());
} }
}; };

View File

@ -2252,14 +2252,16 @@ int HeapObject::SizeFromMap(Map* map) const {
instance_type); instance_type);
} }
if (instance_type == SMALL_ORDERED_HASH_SET_TYPE) { if (instance_type == SMALL_ORDERED_HASH_SET_TYPE) {
return reinterpret_cast<const SmallOrderedHashSet*>(this)->Size(); return SmallOrderedHashSet::SizeFor(
reinterpret_cast<const SmallOrderedHashSet*>(this)->Capacity());
} }
if (instance_type == PROPERTY_ARRAY_TYPE) { if (instance_type == PROPERTY_ARRAY_TYPE) {
return PropertyArray::SizeFor( return PropertyArray::SizeFor(
reinterpret_cast<const PropertyArray*>(this)->synchronized_length()); reinterpret_cast<const PropertyArray*>(this)->synchronized_length());
} }
if (instance_type == SMALL_ORDERED_HASH_MAP_TYPE) { if (instance_type == SMALL_ORDERED_HASH_MAP_TYPE) {
return reinterpret_cast<const SmallOrderedHashMap*>(this)->Size(); return SmallOrderedHashMap::SizeFor(
reinterpret_cast<const SmallOrderedHashMap*>(this)->Capacity());
} }
if (instance_type == FEEDBACK_VECTOR_TYPE) { if (instance_type == FEEDBACK_VECTOR_TYPE) {
return FeedbackVector::SizeFor( return FeedbackVector::SizeFor(
@ -2609,9 +2611,10 @@ void Foreign::set_foreign_address(Address value) {
template <class Derived> template <class Derived>
void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index, void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index,
Object* value) { Object* value) {
int entry_offset = GetDataEntryOffset(entry, relative_index); Address entry_offset =
kHeaderSize + GetDataEntryOffset(entry, relative_index);
RELAXED_WRITE_FIELD(this, entry_offset, value); RELAXED_WRITE_FIELD(this, entry_offset, value);
WRITE_BARRIER(GetHeap(), this, entry_offset, value); WRITE_BARRIER(GetHeap(), this, static_cast<int>(entry_offset), value);
} }
ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset) ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)

View File

@ -3573,18 +3573,6 @@ void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
os << value(); os << value();
} }
#define FIELD_ADDR(p, offset) \
(reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
#define READ_INT32_FIELD(p, offset) \
(*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
#define READ_INT64_FIELD(p, offset) \
(*reinterpret_cast<const int64_t*>(FIELD_ADDR(p, offset)))
#define READ_BYTE_FIELD(p, offset) \
(*reinterpret_cast<const byte*>(FIELD_ADDR(p, offset)))
String* JSReceiver::class_name() { String* JSReceiver::class_name() {
if (IsFunction()) return GetHeap()->Function_string(); if (IsFunction()) return GetHeap()->Function_string();
if (IsJSArgumentsObject()) return GetHeap()->Arguments_string(); if (IsJSArgumentsObject()) return GetHeap()->Arguments_string();
@ -18570,6 +18558,7 @@ SmallOrderedHashTable<SmallOrderedHashMap>::Allocate(Isolate* isolate,
template <class Derived> template <class Derived>
void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate, void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
int capacity) { int capacity) {
DisallowHeapAllocation no_gc;
int num_buckets = capacity / kLoadFactor; int num_buckets = capacity / kLoadFactor;
int num_chains = capacity; int num_chains = capacity;
@ -18577,12 +18566,12 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
SetNumberOfElements(0); SetNumberOfElements(0);
SetNumberOfDeletedElements(0); SetNumberOfDeletedElements(0);
byte* hashtable_start = Address hashtable_start = GetHashTableStartAddress(capacity);
FIELD_ADDR(this, kHeaderSize + (kBucketsStartOffset * kOneByteSize)); memset(reinterpret_cast<byte*>(hashtable_start), kNotFound,
memset(hashtable_start, kNotFound, num_buckets + num_chains); num_buckets + num_chains);
if (isolate->heap()->InNewSpace(this)) { if (isolate->heap()->InNewSpace(this)) {
MemsetPointer(RawField(this, GetDataTableStartOffset()), MemsetPointer(RawField(this, kHeaderSize + kDataTableStartOffset),
isolate->heap()->the_hole_value(), isolate->heap()->the_hole_value(),
capacity * Derived::kEntrySize); capacity * Derived::kEntrySize);
} else { } else {
@ -18601,6 +18590,12 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
for (int i = 0; i < num_chains; ++i) { for (int i = 0; i < num_chains; ++i) {
DCHECK_EQ(kNotFound, GetNextEntry(i)); DCHECK_EQ(kNotFound, GetNextEntry(i));
} }
for (int i = 0; i < capacity; ++i) {
for (int j = 0; j < Derived::kEntrySize; j++) {
DCHECK_EQ(isolate->heap()->the_hole_value(), GetDataEntry(i, j));
}
}
#endif // DEBUG #endif // DEBUG
} }
@ -19567,10 +19562,5 @@ MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
return MaybeHandle<Name>(); return MaybeHandle<Name>();
} }
#undef FIELD_ADDR
#undef READ_INT32_FIELD
#undef READ_INT64_FIELD
#undef READ_BYTE_FIELD
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -586,35 +586,45 @@ class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
// that the DataTable entries start aligned. A bucket or chain value // that the DataTable entries start aligned. A bucket or chain value
// of 255 is used to denote an unknown entry. // of 255 is used to denote an unknown entry.
// //
// Memory layout: [ Header ] [ HashTable ] [ Chains ] [ Padding ] [ DataTable ] // Memory layout: [ Header ] [ Padding ] [ DataTable ] [ HashTable ] [ Chains ]
// //
// On a 64 bit machine with capacity = 4 and 2 entries, // The index are represented as bytes, on a 64 bit machine with
// kEntrySize = 1, capacity = 4 and entries = 2:
// //
// [ Header ] : // [ Header ] :
// [0 .. 7] : Number of elements // [0] : Number of elements
// [8 .. 15] : Number of deleted elements // [1] : Number of deleted elements
// [16 .. 23] : Number of buckets // [2] : Number of buckets
//
// [ HashTable ] :
// [24 .. 31] : First chain-link for bucket 1
// [32 .. 40] : First chain-link for bucket 2
//
// [ Chains ] :
// [40 .. 47] : Next chain link for entry 1
// [48 .. 55] : Next chain link for entry 2
// [56 .. 63] : Next chain link for entry 3
// [64 .. 71] : Next chain link for entry 4
// //
// [ Padding ] : // [ Padding ] :
// [72 .. 127] : Padding // [3 .. 7] : Padding
// //
// [ DataTable ] : // [ DataTable ] :
// [128 .. 128 + kEntrySize - 1] : Entry 1 // [8 .. 15] : Entry 1
// [128 + kEntrySize .. 128 + kEntrySize + kEntrySize - 1] : Entry 2 // [16 .. 23] : Entry 2
// [24 .. 31] : empty
// [32 .. 39] : empty
//
// [ HashTable ] :
// [40] : First chain-link for bucket 1
// [41] : empty
//
// [ Chains ] :
// [42] : Next chain link for bucket 1
// [43] : empty
// [44] : empty
// [45] : empty
// //
template <class Derived> template <class Derived>
class SmallOrderedHashTable : public HeapObject { class SmallOrderedHashTable : public HeapObject {
public: public:
// Offset points to a relative location in the table
typedef int Offset;
// ByteIndex points to a index in the table that needs to be
// converted to an Offset.
typedef int ByteIndex;
void Initialize(Isolate* isolate, int capacity); void Initialize(Isolate* isolate, int capacity);
static Handle<Derived> Allocate(Isolate* isolate, int capacity, static Handle<Derived> Allocate(Isolate* isolate, int capacity,
@ -635,86 +645,51 @@ class SmallOrderedHashTable : public HeapObject {
static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity); static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity);
void SetDataEntry(int entry, int relative_index, Object* value); // Returns total size in bytes required for a table of given
// capacity.
static int SizeFor(int capacity) {
DCHECK_GE(capacity, kMinCapacity);
DCHECK_LE(capacity, kMaxCapacity);
static int GetDataTableStartOffset(int capacity) { int data_table_size = DataTableSizeFor(capacity);
int nof_buckets = capacity / kLoadFactor; int hash_table_size = capacity / kLoadFactor;
int nof_chain_entries = capacity; int chain_table_size = capacity;
int total_size = kHeaderSize + kDataTableStartOffset + data_table_size +
hash_table_size + chain_table_size;
int padding_index = kBucketsStartOffset + nof_buckets + nof_chain_entries; return ((total_size + kPointerSize - 1) / kPointerSize) * kPointerSize;
int padding_offset = padding_index * kBitsPerByte;
return ((padding_offset + kPointerSize - 1) / kPointerSize) * kPointerSize;
} }
int GetDataTableStartOffset() const { // Returns the number elements that can fit into the allocated table.
return GetDataTableStartOffset(Capacity()); int Capacity() const {
int capacity = NumberOfBuckets() * kLoadFactor;
DCHECK_GE(capacity, kMinCapacity);
DCHECK_LE(capacity, kMaxCapacity);
return capacity;
} }
static int Size(int capacity) { // Returns the number elements that are present in the table.
int data_table_start = GetDataTableStartOffset(capacity); int NumberOfElements() const {
int data_table_size = capacity * Derived::kEntrySize * kBitsPerPointer; int nof_elements = getByte(0, kNumberOfElementsByteIndex);
return data_table_start + data_table_size; DCHECK_LE(nof_elements, Capacity());
return nof_elements;
} }
int Size() const { return Size(Capacity()); }
void SetFirstEntry(int bucket, byte value) {
set(kBucketsStartOffset + bucket, value);
}
int GetFirstEntry(int bucket) const {
return get(kBucketsStartOffset + bucket);
}
void SetNextEntry(int entry, int next_entry) {
set(GetChainTableOffset() + entry, next_entry);
}
int GetNextEntry(int entry) const {
return get(GetChainTableOffset() + entry);
}
Object* GetDataEntry(int entry, int relative_index) {
int entry_offset = GetDataEntryOffset(entry, relative_index);
return READ_FIELD(this, entry_offset);
}
Object* KeyAt(int entry) const {
int entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
return READ_FIELD(this, entry_offset);
}
int HashToBucket(int hash) const { return hash & (NumberOfBuckets() - 1); }
int HashToFirstEntry(int hash) const {
int bucket = HashToBucket(hash);
int entry = GetFirstEntry(bucket);
return entry;
}
int GetChainTableOffset() const {
return kBucketsStartOffset + NumberOfBuckets();
}
void SetNumberOfBuckets(int num) { set(kNumberOfBucketsOffset, num); }
void SetNumberOfElements(int num) { set(kNumberOfElementsOffset, num); }
void SetNumberOfDeletedElements(int num) {
set(kNumberOfDeletedElementsOffset, num);
}
int NumberOfElements() const { return get(kNumberOfElementsOffset); }
int NumberOfDeletedElements() const { int NumberOfDeletedElements() const {
return get(kNumberOfDeletedElementsOffset); int nof_deleted_elements = getByte(0, kNumberOfDeletedElementsByteIndex);
DCHECK_LE(nof_deleted_elements, Capacity());
return nof_deleted_elements;
} }
int NumberOfBuckets() const { return get(kNumberOfBucketsOffset); } int NumberOfBuckets() const { return getByte(0, kNumberOfBucketsByteIndex); }
DECL_VERIFIER(SmallOrderedHashTable)
static const byte kNotFound = 0xFF;
static const int kMinCapacity = 4; static const int kMinCapacity = 4;
static const byte kNotFound = 0xFF;
// We use the value 255 to indicate kNotFound for chain and bucket // We use the value 255 to indicate kNotFound for chain and bucket
// values, which means that this value can't be used a valid // values, which means that this value can't be used a valid
@ -722,18 +697,107 @@ class SmallOrderedHashTable : public HeapObject {
static const int kMaxCapacity = 254; static const int kMaxCapacity = 254;
STATIC_ASSERT(kMaxCapacity < kNotFound); STATIC_ASSERT(kMaxCapacity < kNotFound);
static const int kNumberOfElementsOffset = 0;
static const int kNumberOfDeletedElementsOffset = 1;
static const int kNumberOfBucketsOffset = 2;
static const int kBucketsStartOffset = 3;
// The load factor is used to derive the number of buckets from // The load factor is used to derive the number of buckets from
// capacity during Allocation. We also depend on this to calaculate // capacity during Allocation. We also depend on this to calaculate
// the capacity from number of buckets after allocation. If we // the capacity from number of buckets after allocation. If we
// decide to change kLoadFactor to something other than 2, capacity // decide to change kLoadFactor to something other than 2, capacity
// should be stored as another field of this object. // should be stored as another field of this object.
static const int kLoadFactor = 2; static const int kLoadFactor = 2;
static const int kBitsPerPointer = kPointerSize * kBitsPerByte;
protected:
void SetDataEntry(int entry, int relative_index, Object* value);
// TODO(gsathya): Calculate all the various possible values for this
// at compile time since capacity can only be 4 different values.
Offset GetBucketsStartOffset() const {
int capacity = Capacity();
int data_table_size = DataTableSizeFor(capacity);
return kDataTableStartOffset + data_table_size;
}
Address GetHashTableStartAddress(int capacity) const {
return FIELD_ADDR(
this, kHeaderSize + kDataTableStartOffset + DataTableSizeFor(capacity));
}
void SetFirstEntry(int bucket, byte value) {
DCHECK_LE(static_cast<unsigned>(bucket), NumberOfBuckets());
setByte(GetBucketsStartOffset(), bucket, value);
}
int GetFirstEntry(int bucket) const {
DCHECK_LE(static_cast<unsigned>(bucket), NumberOfBuckets());
return getByte(GetBucketsStartOffset(), bucket);
}
// TODO(gsathya): Calculate all the various possible values for this
// at compile time since capacity can only be 4 different values.
Offset GetChainTableOffset() const {
int nof_buckets = NumberOfBuckets();
int capacity = nof_buckets * kLoadFactor;
DCHECK_EQ(Capacity(), capacity);
int data_table_size = DataTableSizeFor(capacity);
int hash_table_size = nof_buckets;
return kDataTableStartOffset + data_table_size + hash_table_size;
}
void SetNextEntry(int entry, int next_entry) {
DCHECK_LT(static_cast<unsigned>(entry), Capacity());
DCHECK_GE(static_cast<unsigned>(next_entry), 0);
DCHECK(next_entry <= Capacity() || next_entry == kNotFound);
setByte(GetChainTableOffset(), entry, next_entry);
}
int GetNextEntry(int entry) const {
DCHECK_LT(entry, Capacity());
return getByte(GetChainTableOffset(), entry);
}
Object* GetDataEntry(int entry, int relative_index) {
DCHECK_LT(entry, Capacity());
DCHECK_LE(static_cast<unsigned>(relative_index), Derived::kEntrySize);
Offset entry_offset = GetDataEntryOffset(entry, relative_index);
return READ_FIELD(this, kHeaderSize + entry_offset);
}
Object* KeyAt(int entry) const {
DCHECK_LT(entry, Capacity());
Offset entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
return READ_FIELD(this, kHeaderSize + entry_offset);
}
int HashToBucket(int hash) const { return hash & (NumberOfBuckets() - 1); }
int HashToFirstEntry(int hash) const {
int bucket = HashToBucket(hash);
int entry = GetFirstEntry(bucket);
DCHECK(entry < Capacity() || entry == kNotFound);
return entry;
}
void SetNumberOfBuckets(int num) {
setByte(0, kNumberOfBucketsByteIndex, num);
}
void SetNumberOfElements(int num) {
DCHECK_LE(static_cast<unsigned>(num), Capacity());
setByte(0, kNumberOfElementsByteIndex, num);
}
void SetNumberOfDeletedElements(int num) {
DCHECK_LE(static_cast<unsigned>(num), Capacity());
setByte(0, kNumberOfDeletedElementsByteIndex, num);
}
static const int kNumberOfElementsByteIndex = 0;
static const int kNumberOfDeletedElementsByteIndex = 1;
static const int kNumberOfBucketsByteIndex = 2;
static const Offset kDataTableStartOffset = kPointerSize;
static constexpr int DataTableSizeFor(int capacity) {
return capacity * Derived::kEntrySize * kPointerSize;
}
// Our growth strategy involves doubling the capacity until we reach // Our growth strategy involves doubling the capacity until we reach
// kMaxCapacity, but since the kMaxCapacity is always less than 256, // kMaxCapacity, but since the kMaxCapacity is always less than 256,
@ -742,31 +806,31 @@ class SmallOrderedHashTable : public HeapObject {
// SmallOrderedHashTable::Grow. // SmallOrderedHashTable::Grow.
static const int kGrowthHack = 256; static const int kGrowthHack = 256;
DECL_VERIFIER(SmallOrderedHashTable)
protected:
// This is used for accessing the non |DataTable| part of the // This is used for accessing the non |DataTable| part of the
// structure. // structure.
byte get(int index) const { byte getByte(Offset offset, ByteIndex index) const {
return READ_BYTE_FIELD(this, kHeaderSize + (index * kOneByteSize)); DCHECK(offset < kDataTableStartOffset || offset >= GetBucketsStartOffset());
return READ_BYTE_FIELD(this, kHeaderSize + offset + (index * kOneByteSize));
} }
void set(int index, byte value) { void setByte(Offset offset, ByteIndex index, byte value) {
WRITE_BYTE_FIELD(this, kHeaderSize + (index * kOneByteSize), value); DCHECK(offset < kDataTableStartOffset || offset >= GetBucketsStartOffset());
WRITE_BYTE_FIELD(this, kHeaderSize + offset + (index * kOneByteSize),
value);
} }
int GetDataEntryOffset(int entry, int relative_index) const { Offset GetDataEntryOffset(int entry, int relative_index) const {
int datatable_start = GetDataTableStartOffset(); DCHECK_LT(entry, Capacity());
int offset_in_datatable = entry * Derived::kEntrySize * kPointerSize; int offset_in_datatable = entry * Derived::kEntrySize * kPointerSize;
int offset_in_entry = relative_index * kPointerSize; int offset_in_entry = relative_index * kPointerSize;
return datatable_start + offset_in_datatable + offset_in_entry; return kDataTableStartOffset + offset_in_datatable + offset_in_entry;
} }
// Returns the number elements that can fit into the allocated buffer.
int Capacity() const { return NumberOfBuckets() * kLoadFactor; }
int UsedCapacity() const { int UsedCapacity() const {
return NumberOfElements() + NumberOfDeletedElements(); int used = NumberOfElements() + NumberOfDeletedElements();
DCHECK_LE(used, Capacity());
return used;
} }
}; };