[heap][deserializer] Better HeapObject alignment checks

Unaglined allocations are not fully supported in V8.

- Set USE_ALLOCATION_ALIGNMENT_BOOL to false for documentation
- Verify HeapObject address alignment requirements with --verify-heap
- Move address alignment to right after allocation in the deserializer
- Use object_size in the CheckAlignment helper to get a chance to
  figure out which allocation path we took

Bug: chromium:1330861, v8:8875
Change-Id: Iffd02d869923ccec133618250dfefb0480b02741
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3717995
Reviewed-by: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Toon Verwaest <verwaest@chromium.org>
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81358}
This commit is contained in:
Camillo 2022-06-24 16:53:23 +02:00 committed by V8 LUCI CQ
parent 930f3ffb73
commit 14af9c22db
6 changed files with 14 additions and 23 deletions

View File

@ -932,17 +932,11 @@ enum AllocationAlignment {
kDoubleUnaligned
};
#ifdef V8_HOST_ARCH_32_BIT
#define USE_ALLOCATION_ALIGNMENT_BOOL true
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
// tagged aligned (not double aligned) access since all our supported platforms
// allow tagged-aligned access to doubles and full words.
#define USE_ALLOCATION_ALIGNMENT_BOOL false
#endif // V8_HOST_ARCH_32_BIT
enum class AccessMode { ATOMIC, NON_ATOMIC };

View File

@ -179,6 +179,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
VerifyPointer(isolate, map(cage_base));
CHECK(map(cage_base).IsMap(cage_base));
CHECK(CheckRequiredAlignment(isolate));
switch (map(cage_base).instance_type()) {
#define STRING_TYPE_CASE(TYPE, size, name, CamelName) case TYPE:
STRING_TYPE_LIST(STRING_TYPE_CASE)

View File

@ -199,6 +199,7 @@ class HeapObject : public Object {
#endif
static inline AllocationAlignment RequiredAlignment(Map map);
bool inline CheckRequiredAlignment(PtrComprCageBase cage_base) const;
// Whether the object needs rehashing. That is the case if the object's
// content depends on FLAG_hash_seed. When the object is deserialized into

View File

@ -1006,6 +1006,12 @@ AllocationAlignment HeapObject::RequiredAlignment(Map map) {
return kTaggedAligned;
}
bool HeapObject::CheckRequiredAlignment(PtrComprCageBase cage_base) const {
AllocationAlignment alignment = HeapObject::RequiredAlignment(map(cage_base));
CHECK_EQ(0, Heap::GetFillToAlign(address(), alignment));
return true;
}
Address HeapObject::GetFieldAddress(int field_offset) const {
return field_address(field_offset);
}

View File

@ -425,10 +425,6 @@ void Deserializer<IsolateT>::PostProcessNewJSReceiver(
EmptyBackingStoreBuffer());
}
}
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
HeapObject::RequiredAlignment(map)));
}
template <typename IsolateT>
@ -439,10 +435,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
Map raw_map = *map;
DCHECK_EQ(raw_map, obj->map(isolate_));
InstanceType instance_type = raw_map.instance_type();
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
HeapObject::RequiredAlignment(raw_map)));
HeapObject raw_obj = *obj;
DCHECK_IMPLIES(deserializing_user_code(), should_rehash());
if (should_rehash()) {
@ -640,6 +632,7 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadObject(SnapshotSpace space) {
raw_obj.set_map_after_allocation(*map);
MemsetTagged(raw_obj.RawField(kTaggedSize),
Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
DCHECK(raw_obj.CheckRequiredAlignment(isolate()));
// Make sure BytecodeArrays have a valid age, so that the marker doesn't
// break when making them older.
@ -701,6 +694,7 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadMetaMap() {
raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj));
MemsetTagged(raw_obj.RawField(kTaggedSize),
Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
DCHECK(raw_obj.CheckRequiredAlignment(isolate()));
Handle<HeapObject> obj = handle(raw_obj, isolate());
back_refs_.push_back(obj);

View File

@ -916,13 +916,7 @@ TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
int object_size =
static_cast<int>(MemoryAllocator::GetCommitPageSize() - kApiTaggedSize);
// TODO(v8:8875): Pointer compression does not enable aligned memory allocation
// yet.
#ifdef V8_COMPRESS_POINTERS
int alignment = kInt32Size;
#else
int alignment = kDoubleSize;
#endif
int alignment = USE_ALLOCATION_ALIGNMENT_BOOL ? kDoubleSize : kTaggedSize;
HeapObject object =
faked_space->AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();