[ptr-compr] Use [Maybe]ObjectSlots as bottlenecks for accessing tagged fields

This CL introduces Tagged_t and AtomicTagged_t typedefs which represent
the storage type of tagged values in V8 heap.

Bug: v8:7703
Change-Id: Ib57e85ea073eaf896b6406cf0f62adcef9a114ce
Reviewed-on: https://chromium-review.googlesource.com/c/1352294
Reviewed-by: Toon Verwaest <verwaest@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57878}
This commit is contained in:
Igor Sheludko 2018-11-27 16:21:36 +01:00 committed by Commit Bot
parent 43a532fc38
commit 3649dc187d
12 changed files with 127 additions and 97 deletions

View File

@ -457,9 +457,11 @@ static void SortIndices(
// store operations that are safe for concurrent marking.
AtomicSlot start(indices->GetFirstElementAddress());
std::sort(start, start + sort_size,
[isolate](Address elementA, Address elementB) {
const Object* a = reinterpret_cast<Object*>(elementA);
const Object* b = reinterpret_cast<Object*>(elementB);
[isolate](Tagged_t elementA, Tagged_t elementB) {
// TODO(ishell): revisit the code below
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
ObjectPtr a(elementA);
ObjectPtr b(elementB);
if (a->IsSmi() || !a->IsUndefined(isolate)) {
if (!b->IsSmi() && b->IsUndefined(isolate)) {
return true;

View File

@ -166,7 +166,7 @@ void FeedbackVector::set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kFeedbackSlotsOffset + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
RELAXED_WRITE_WEAK_FIELD(this, offset, value);
CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
}

View File

@ -12,6 +12,7 @@
#include <ostream>
#include "include/v8-internal.h"
#include "src/base/atomic-utils.h"
#include "src/base/build_config.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
@ -207,6 +208,14 @@ constexpr int kTaggedSize = kSystemPointerSize;
constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2;
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
// These types define raw and atomic storage types for tagged values stored
// on V8 heap.
using Tagged_t = Address;
using AtomicTagged_t = base::AtomicWord;
using AsAtomicTagged = base::AsAtomicPointerImpl<AtomicTagged_t>;
STATIC_ASSERT(sizeof(Tagged_t) == kTaggedSize);
STATIC_ASSERT(sizeof(AtomicTagged_t) == kTaggedSize);
// TODO(ishell): use kTaggedSize or kSystemPointerSize instead.
constexpr int kPointerSize = kSystemPointerSize;
constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2;

View File

@ -14,7 +14,6 @@
#include "src/objects.h"
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/tsan.h"
#include "src/builtins/builtins.h"
@ -889,11 +888,9 @@ ObjectSlot HeapObject::map_slot() {
}
MapWord HeapObject::map_word() const {
return MapWord(
reinterpret_cast<uintptr_t>(RELAXED_READ_FIELD(this, kMapOffset)));
return MapWord(RELAXED_READ_FIELD(this, kMapOffset).ptr());
}
void HeapObject::set_map_word(MapWord map_word) {
RELAXED_WRITE_FIELD(this, kMapOffset,
reinterpret_cast<Object*>(map_word.value_));
@ -1408,7 +1405,7 @@ MaybeObject DescriptorArray::get(int index) const {
void DescriptorArray::set(int index, MaybeObject value) {
DCHECK(index >= 0 && index < this->length());
RELAXED_WRITE_FIELD(this, offset(index), value);
RELAXED_WRITE_WEAK_FIELD(this, offset(index), value);
WEAK_WRITE_BARRIER(this, offset(index), value);
}

View File

@ -18041,7 +18041,9 @@ int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
template <typename Dictionary>
struct EnumIndexComparator {
explicit EnumIndexComparator(Dictionary dict) : dict(dict) {}
bool operator()(Address a, Address b) {
bool operator()(Tagged_t a, Tagged_t b) {
// TODO(ishell): revisit the code below
STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
PropertyDetails da(dict->DetailsAt(Smi(a).value()));
PropertyDetails db(dict->DetailsAt(Smi(b).value()));
return da.dictionary_index() < db.dictionary_index();

View File

@ -30,7 +30,7 @@ class Object;
// Storing heap object through this slot may require triggering write barriers
// so this operation must be done via static store_tagged() methods.
class EmbedderDataSlot
: public SlotBase<EmbedderDataSlot, kEmbedderDataSlotSize> {
: public SlotBase<EmbedderDataSlot, Address, kEmbedderDataSlotSize> {
public:
EmbedderDataSlot() : SlotBase(kNullAddress) {}
V8_INLINE EmbedderDataSlot(EmbedderDataArray array, int entry_index);

View File

@ -277,7 +277,7 @@ void WeakFixedArray::Set(int index, MaybeObject value) {
DCHECK_GE(index, 0);
DCHECK_LT(index, length());
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value);
RELAXED_WRITE_WEAK_FIELD(this, offset, value);
WEAK_WRITE_BARRIER(this, offset, value);
}
@ -285,7 +285,7 @@ void WeakFixedArray::Set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, length());
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value);
RELAXED_WRITE_WEAK_FIELD(this, offset, value);
CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
}
@ -306,7 +306,7 @@ void WeakArrayList::Set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->capacity());
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value);
RELAXED_WRITE_WEAK_FIELD(this, offset, value);
CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
}

View File

@ -118,8 +118,7 @@ ObjectSlot HeapObjectPtr::map_slot() {
}
MapWord HeapObjectPtr::map_word() const {
return MapWord(
reinterpret_cast<Address>(RELAXED_READ_FIELD(this, kMapOffset)));
return MapWord(RELAXED_READ_FIELD(this, kMapOffset).ptr());
}
void HeapObjectPtr::set_map_word(MapWord map_word) {

View File

@ -296,49 +296,39 @@
#define FIELD_ADDR(p, offset) ((p)->ptr() + offset - kHeapObjectTag)
#define READ_FIELD(p, offset) \
(*reinterpret_cast<Object* const*>(FIELD_ADDR(p, offset)))
#define READ_FIELD(p, offset) (*ObjectSlot(FIELD_ADDR(p, offset)))
#define READ_WEAK_FIELD(p, offset) \
MaybeObject(*reinterpret_cast<Address*>(FIELD_ADDR(p, offset)))
#define READ_WEAK_FIELD(p, offset) (*MaybeObjectSlot(FIELD_ADDR(p, offset)))
#define ACQUIRE_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::Acquire_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
#define ACQUIRE_READ_FIELD(p, offset) \
ObjectSlot(FIELD_ADDR(p, offset)).Acquire_Load1()
#define RELAXED_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::Relaxed_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
#define RELAXED_READ_FIELD(p, offset) \
ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
#define RELAXED_READ_WEAK_FIELD(p, offset) \
MaybeObject(base::Relaxed_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
#ifdef V8_CONCURRENT_MARKING
#define WRITE_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
static_cast<base::AtomicWord>((value)->ptr()));
#define WRITE_WEAK_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
static_cast<base::AtomicWord>(value.ptr()));
#define WRITE_FIELD(p, offset, value) \
ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store1(value)
#define WRITE_WEAK_FIELD(p, offset, value) \
MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
#else
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
ObjectSlot(FIELD_ADDR(p, offset)).store(value)
#define WRITE_WEAK_FIELD(p, offset, value) \
(*reinterpret_cast<Address*>(FIELD_ADDR(p, offset)) = value.ptr())
MaybeObjectSlot(FIELD_ADDR(p, offset)).store(value)
#endif
#define RELEASE_WRITE_FIELD(p, offset, value) \
base::Release_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
static_cast<base::AtomicWord>((value)->ptr()));
#define RELEASE_WRITE_FIELD(p, offset, value) \
ObjectSlot(FIELD_ADDR(p, offset)).Release_Store1(value)
#define RELAXED_WRITE_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
static_cast<base::AtomicWord>((value)->ptr()));
#define RELAXED_WRITE_FIELD(p, offset, value) \
ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store1(value)
#define RELAXED_WRITE_WEAK_FIELD(p, offset, value) \
MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
#define WRITE_BARRIER(object, offset, value) \
do { \

View File

@ -18,41 +18,40 @@ namespace internal {
// FixedArray array;
// AtomicSlot start(array->GetFirstElementAddress());
// std::sort(start, start + given_length,
// [](Address a, Address b) {
// [](Tagged_t a, Tagged_t b) {
// // Decompress a and b if necessary.
// return my_comparison(a, b);
// });
// Note how the comparator operates on Address values, representing the raw
// data found at the given heap location, so you probably want to construct
// an Object from it.
class AtomicSlot : public SlotBase<AtomicSlot, kTaggedSize> {
class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t, kTaggedSize> {
public:
// This class is a stand-in for "Address&" that uses custom atomic
// read/write operations for the actual memory accesses.
class Reference {
public:
explicit Reference(Address* address) : address_(address) {}
explicit Reference(Tagged_t* address) : address_(address) {}
Reference(const Reference& other) : address_(other.address_) {}
Reference& operator=(const Reference& other) {
base::AsAtomicWord::Relaxed_Store(
address_, base::AsAtomicWord::Relaxed_Load(other.address_));
AsAtomicTagged::Relaxed_Store(
address_, AsAtomicTagged::Relaxed_Load(other.address_));
return *this;
}
Reference& operator=(Address value) {
base::AsAtomicWord::Relaxed_Store(address_, value);
Reference& operator=(Tagged_t value) {
AsAtomicTagged::Relaxed_Store(address_, value);
return *this;
}
// Values of type AtomicSlot::reference must be implicitly convertible
// to AtomicSlot::value_type.
operator Address() const {
return base::AsAtomicWord::Relaxed_Load(address_);
}
operator Tagged_t() const { return AsAtomicTagged::Relaxed_Load(address_); }
void swap(Reference& other) {
Address tmp = value();
base::AsAtomicWord::Relaxed_Store(address_, other.value());
base::AsAtomicWord::Relaxed_Store(other.address_, tmp);
AsAtomicTagged::Relaxed_Store(address_, other.value());
AsAtomicTagged::Relaxed_Store(other.address_, tmp);
}
bool operator<(const Reference& other) const {
@ -64,15 +63,15 @@ class AtomicSlot : public SlotBase<AtomicSlot, kTaggedSize> {
}
private:
Address value() const { return base::AsAtomicWord::Relaxed_Load(address_); }
Address value() const { return AsAtomicTagged::Relaxed_Load(address_); }
Address* address_;
Tagged_t* address_;
};
// The rest of this class follows C++'s "RandomAccessIterator" requirements.
// Most of the heavy lifting is inherited from SlotBase.
typedef int difference_type;
typedef Address value_type;
typedef Tagged_t value_type;
typedef Reference reference;
typedef void* pointer; // Must be present, but should not be used.
typedef std::random_access_iterator_tag iterator_category;
@ -82,16 +81,16 @@ class AtomicSlot : public SlotBase<AtomicSlot, kTaggedSize> {
explicit AtomicSlot(ObjectSlot slot) : SlotBase(slot.address()) {}
Reference operator*() const {
return Reference(reinterpret_cast<Address*>(address()));
return Reference(reinterpret_cast<Tagged_t*>(address()));
}
Reference operator[](difference_type i) const {
return Reference(reinterpret_cast<Address*>(address() + i * kPointerSize));
return Reference(reinterpret_cast<Tagged_t*>(address() + i * kTaggedSize));
}
friend void swap(Reference lhs, Reference rhs) { lhs.swap(rhs); }
friend difference_type operator-(AtomicSlot a, AtomicSlot b) {
return static_cast<int>(a.address() - b.address()) / kPointerSize;
return static_cast<int>(a.address() - b.address()) / kTaggedSize;
}
};

View File

@ -18,49 +18,69 @@ namespace internal {
ObjectSlot::ObjectSlot(ObjectPtr* object)
: SlotBase(reinterpret_cast<Address>(&object->ptr_)) {}
void ObjectSlot::store(Object* value) { *location() = value->ptr(); }
void ObjectSlot::store(Object* value) const { *location() = value->ptr(); }
ObjectPtr ObjectSlot::Acquire_Load() const {
return ObjectPtr(base::AsAtomicWord::Acquire_Load(location()));
return ObjectPtr(AsAtomicTagged::Acquire_Load(location()));
}
Object* ObjectSlot::Acquire_Load1() const {
return reinterpret_cast<Object*>(AsAtomicTagged::Acquire_Load(location()));
}
ObjectPtr ObjectSlot::Relaxed_Load() const {
return ObjectPtr(base::AsAtomicWord::Relaxed_Load(location()));
return ObjectPtr(AsAtomicTagged::Relaxed_Load(location()));
}
void ObjectSlot::Relaxed_Store(ObjectPtr value) const {
base::AsAtomicWord::Relaxed_Store(location(), value->ptr());
AsAtomicTagged::Relaxed_Store(location(), value->ptr());
}
void ObjectSlot::Relaxed_Store1(Object* value) const {
AsAtomicTagged::Relaxed_Store(location(), value->ptr());
}
void ObjectSlot::Release_Store1(Object* value) const {
AsAtomicTagged::Release_Store(location(), value->ptr());
}
void ObjectSlot::Release_Store(ObjectPtr value) const {
base::AsAtomicWord::Release_Store(location(), value->ptr());
AsAtomicTagged::Release_Store(location(), value->ptr());
}
ObjectPtr ObjectSlot::Release_CompareAndSwap(ObjectPtr old,
ObjectPtr target) const {
Address result = base::AsAtomicWord::Release_CompareAndSwap(
Address result = AsAtomicTagged::Release_CompareAndSwap(
location(), old->ptr(), target->ptr());
return ObjectPtr(result);
}
MaybeObject MaybeObjectSlot::operator*() { return MaybeObject(*location()); }
MaybeObject MaybeObjectSlot::operator*() const {
return MaybeObject(*location());
}
void MaybeObjectSlot::store(MaybeObject value) { *location() = value.ptr(); }
void MaybeObjectSlot::store(MaybeObject value) const {
*location() = value.ptr();
}
MaybeObject MaybeObjectSlot::Relaxed_Load() const {
return MaybeObject(base::AsAtomicWord::Relaxed_Load(location()));
return MaybeObject(AsAtomicTagged::Relaxed_Load(location()));
}
void MaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
AsAtomicTagged::Relaxed_Store(location(), value->ptr());
}
void MaybeObjectSlot::Release_CompareAndSwap(MaybeObject old,
MaybeObject target) const {
base::AsAtomicWord::Release_CompareAndSwap(location(), old.ptr(),
target.ptr());
AsAtomicTagged::Release_CompareAndSwap(location(), old.ptr(), target.ptr());
}
HeapObjectReference HeapObjectSlot::operator*() {
HeapObjectReference HeapObjectSlot::operator*() const {
return HeapObjectReference(*location());
}
void HeapObjectSlot::store(HeapObjectReference value) {
void HeapObjectSlot::store(HeapObjectReference value) const {
*location() = value.ptr();
}

View File

@ -12,9 +12,12 @@ namespace internal {
class ObjectPtr;
template <typename Subclass, size_t SlotDataSize>
template <typename Subclass, typename Data, size_t SlotDataSize>
class SlotBase {
public:
using TData = Data;
// TODO(ishell): This should eventually become just sizeof(TData) once
// pointer compression is implemented.
static constexpr size_t kSlotDataSize = SlotDataSize;
Subclass& operator++() { // Prefix increment.
@ -65,7 +68,7 @@ class SlotBase {
Address address() const { return ptr_; }
// For symmetry with Handle.
Address* location() const { return reinterpret_cast<Address*>(ptr_); }
TData* location() const { return reinterpret_cast<TData*>(ptr_); }
protected:
STATIC_ASSERT(IsAligned(kSlotDataSize, kTaggedSize));
@ -80,11 +83,11 @@ class SlotBase {
Address ptr_;
};
// An ObjectSlot instance describes a pointer-sized field ("slot") holding
// An ObjectSlot instance describes a kTaggedSize-sized field ("slot") holding
// a tagged pointer (smi or heap object).
// Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store().
class ObjectSlot : public SlotBase<ObjectSlot, kTaggedSize> {
class ObjectSlot : public SlotBase<ObjectSlot, Tagged_t, kTaggedSize> {
public:
ObjectSlot() : SlotBase(kNullAddress) {}
explicit ObjectSlot(Address ptr) : SlotBase(ptr) {}
@ -94,11 +97,11 @@ class ObjectSlot : public SlotBase<ObjectSlot, kTaggedSize> {
explicit ObjectSlot(Object const* const* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T>
explicit ObjectSlot(SlotBase<T, kSlotDataSize> slot)
explicit ObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
: SlotBase(slot.address()) {}
Object* operator*() const { return *reinterpret_cast<Object**>(address()); }
inline void store(Object* value);
inline void store(Object* value) const;
inline ObjectPtr Acquire_Load() const;
inline ObjectPtr Relaxed_Load() const;
@ -106,51 +109,60 @@ class ObjectSlot : public SlotBase<ObjectSlot, kTaggedSize> {
inline void Release_Store(ObjectPtr value) const;
inline ObjectPtr Release_CompareAndSwap(ObjectPtr old,
ObjectPtr target) const;
// Old-style alternative for the above, temporarily separate to allow
// incremental transition.
// TODO(3770): Get rid of the duplication when the migration is complete.
inline Object* Acquire_Load1() const;
inline void Relaxed_Store1(Object* value) const;
inline void Release_Store1(Object* value) const;
};
// A MaybeObjectSlot instance describes a pointer-sized field ("slot") holding
// a possibly-weak tagged pointer (think: MaybeObject).
// A MaybeObjectSlot instance describes a kTaggedSize-sized field ("slot")
// holding a possibly-weak tagged pointer (think: MaybeObject).
// Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store().
class MaybeObjectSlot : public SlotBase<MaybeObjectSlot, kTaggedSize> {
class MaybeObjectSlot
: public SlotBase<MaybeObjectSlot, Tagged_t, kTaggedSize> {
public:
explicit MaybeObjectSlot(Address ptr) : SlotBase(ptr) {}
explicit MaybeObjectSlot(Object** ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
template <typename T>
explicit MaybeObjectSlot(SlotBase<T, kSlotDataSize> slot)
explicit MaybeObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
: SlotBase(slot.address()) {}
inline MaybeObject operator*();
inline void store(MaybeObject value);
inline MaybeObject operator*() const;
inline void store(MaybeObject value) const;
inline MaybeObject Relaxed_Load() const;
inline void Relaxed_Store(MaybeObject value) const;
inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const;
};
// A HeapObjectSlot instance describes a pointer-sized field ("slot") holding
// a weak or strong pointer to a heap object (think: HeapObjectReference).
// A HeapObjectSlot instance describes a kTaggedSize-sized field ("slot")
// holding a weak or strong pointer to a heap object (think:
// HeapObjectReference).
// Its address() is the address of the slot.
// The slot's contents can be read and written using operator* and store().
// In case it is known that that slot contains a strong heap object pointer,
// ToHeapObject() can be used to retrieve that heap object.
class HeapObjectSlot : public SlotBase<HeapObjectSlot, kTaggedSize> {
class HeapObjectSlot : public SlotBase<HeapObjectSlot, Tagged_t, kTaggedSize> {
public:
HeapObjectSlot() : SlotBase(kNullAddress) {}
explicit HeapObjectSlot(Address ptr) : SlotBase(ptr) {}
template <typename T>
explicit HeapObjectSlot(SlotBase<T, kSlotDataSize> slot)
explicit HeapObjectSlot(SlotBase<T, TData, kSlotDataSize> slot)
: SlotBase(slot.address()) {}
inline HeapObjectReference operator*();
inline void store(HeapObjectReference value);
inline HeapObjectReference operator*() const;
inline void store(HeapObjectReference value) const;
HeapObject* ToHeapObject() {
HeapObject* ToHeapObject() const {
DCHECK((*location() & kHeapObjectTagMask) == kHeapObjectTag);
return reinterpret_cast<HeapObject*>(*location());
}
void StoreHeapObject(HeapObject* value) {
void StoreHeapObject(HeapObject* value) const {
*reinterpret_cast<HeapObject**>(address()) = value;
}
};