[heap][deserializer] Better HeapObject alignment checks
Unaglined allocations are not fully supported in V8. - Set USE_ALLOCATION_ALIGNMENT_BOOL to false for documentation - Verify HeapObject address alignment requirements with --verify-heap - Move address alignment to right after allocation in the deserializer - Use object_size in the CheckAlignment helper to get a chance to figure out which allocation path we took Bug: chromium:1330861, v8:8875 Change-Id: Iffd02d869923ccec133618250dfefb0480b02741 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3717995 Reviewed-by: Igor Sheludko <ishell@chromium.org> Reviewed-by: Toon Verwaest <verwaest@chromium.org> Commit-Queue: Camillo Bruni <cbruni@chromium.org> Cr-Commit-Position: refs/heads/main@{#81358}
This commit is contained in:
parent
930f3ffb73
commit
14af9c22db
@ -932,17 +932,11 @@ enum AllocationAlignment {
|
|||||||
kDoubleUnaligned
|
kDoubleUnaligned
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef V8_HOST_ARCH_32_BIT
|
|
||||||
#define USE_ALLOCATION_ALIGNMENT_BOOL true
|
|
||||||
#else
|
|
||||||
#ifdef V8_COMPRESS_POINTERS
|
|
||||||
// TODO(ishell, v8:8875): Consider using aligned allocations once the
|
// TODO(ishell, v8:8875): Consider using aligned allocations once the
|
||||||
// allocation alignment inconsistency is fixed. For now we keep using
|
// allocation alignment inconsistency is fixed. For now we keep using
|
||||||
// unaligned access since both x64 and arm64 architectures (where pointer
|
// tagged aligned (not double aligned) access since all our supported platforms
|
||||||
// compression is supported) allow unaligned access to doubles and full words.
|
// allow tagged-aligned access to doubles and full words.
|
||||||
#endif // V8_COMPRESS_POINTERS
|
|
||||||
#define USE_ALLOCATION_ALIGNMENT_BOOL false
|
#define USE_ALLOCATION_ALIGNMENT_BOOL false
|
||||||
#endif // V8_HOST_ARCH_32_BIT
|
|
||||||
|
|
||||||
enum class AccessMode { ATOMIC, NON_ATOMIC };
|
enum class AccessMode { ATOMIC, NON_ATOMIC };
|
||||||
|
|
||||||
|
@ -179,6 +179,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
|
|||||||
VerifyPointer(isolate, map(cage_base));
|
VerifyPointer(isolate, map(cage_base));
|
||||||
CHECK(map(cage_base).IsMap(cage_base));
|
CHECK(map(cage_base).IsMap(cage_base));
|
||||||
|
|
||||||
|
CHECK(CheckRequiredAlignment(isolate));
|
||||||
|
|
||||||
switch (map(cage_base).instance_type()) {
|
switch (map(cage_base).instance_type()) {
|
||||||
#define STRING_TYPE_CASE(TYPE, size, name, CamelName) case TYPE:
|
#define STRING_TYPE_CASE(TYPE, size, name, CamelName) case TYPE:
|
||||||
STRING_TYPE_LIST(STRING_TYPE_CASE)
|
STRING_TYPE_LIST(STRING_TYPE_CASE)
|
||||||
|
@ -199,6 +199,7 @@ class HeapObject : public Object {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline AllocationAlignment RequiredAlignment(Map map);
|
static inline AllocationAlignment RequiredAlignment(Map map);
|
||||||
|
bool inline CheckRequiredAlignment(PtrComprCageBase cage_base) const;
|
||||||
|
|
||||||
// Whether the object needs rehashing. That is the case if the object's
|
// Whether the object needs rehashing. That is the case if the object's
|
||||||
// content depends on FLAG_hash_seed. When the object is deserialized into
|
// content depends on FLAG_hash_seed. When the object is deserialized into
|
||||||
|
@ -1006,6 +1006,12 @@ AllocationAlignment HeapObject::RequiredAlignment(Map map) {
|
|||||||
return kTaggedAligned;
|
return kTaggedAligned;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool HeapObject::CheckRequiredAlignment(PtrComprCageBase cage_base) const {
|
||||||
|
AllocationAlignment alignment = HeapObject::RequiredAlignment(map(cage_base));
|
||||||
|
CHECK_EQ(0, Heap::GetFillToAlign(address(), alignment));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
Address HeapObject::GetFieldAddress(int field_offset) const {
|
Address HeapObject::GetFieldAddress(int field_offset) const {
|
||||||
return field_address(field_offset);
|
return field_address(field_offset);
|
||||||
}
|
}
|
||||||
|
@ -425,10 +425,6 @@ void Deserializer<IsolateT>::PostProcessNewJSReceiver(
|
|||||||
EmptyBackingStoreBuffer());
|
EmptyBackingStoreBuffer());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check alignment.
|
|
||||||
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
|
|
||||||
HeapObject::RequiredAlignment(map)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename IsolateT>
|
template <typename IsolateT>
|
||||||
@ -439,10 +435,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
|
|||||||
Map raw_map = *map;
|
Map raw_map = *map;
|
||||||
DCHECK_EQ(raw_map, obj->map(isolate_));
|
DCHECK_EQ(raw_map, obj->map(isolate_));
|
||||||
InstanceType instance_type = raw_map.instance_type();
|
InstanceType instance_type = raw_map.instance_type();
|
||||||
|
|
||||||
// Check alignment.
|
|
||||||
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
|
|
||||||
HeapObject::RequiredAlignment(raw_map)));
|
|
||||||
HeapObject raw_obj = *obj;
|
HeapObject raw_obj = *obj;
|
||||||
DCHECK_IMPLIES(deserializing_user_code(), should_rehash());
|
DCHECK_IMPLIES(deserializing_user_code(), should_rehash());
|
||||||
if (should_rehash()) {
|
if (should_rehash()) {
|
||||||
@ -640,6 +632,7 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadObject(SnapshotSpace space) {
|
|||||||
raw_obj.set_map_after_allocation(*map);
|
raw_obj.set_map_after_allocation(*map);
|
||||||
MemsetTagged(raw_obj.RawField(kTaggedSize),
|
MemsetTagged(raw_obj.RawField(kTaggedSize),
|
||||||
Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
|
Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
|
||||||
|
DCHECK(raw_obj.CheckRequiredAlignment(isolate()));
|
||||||
|
|
||||||
// Make sure BytecodeArrays have a valid age, so that the marker doesn't
|
// Make sure BytecodeArrays have a valid age, so that the marker doesn't
|
||||||
// break when making them older.
|
// break when making them older.
|
||||||
@ -701,6 +694,7 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadMetaMap() {
|
|||||||
raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj));
|
raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj));
|
||||||
MemsetTagged(raw_obj.RawField(kTaggedSize),
|
MemsetTagged(raw_obj.RawField(kTaggedSize),
|
||||||
Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
|
Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
|
||||||
|
DCHECK(raw_obj.CheckRequiredAlignment(isolate()));
|
||||||
|
|
||||||
Handle<HeapObject> obj = handle(raw_obj, isolate());
|
Handle<HeapObject> obj = handle(raw_obj, isolate());
|
||||||
back_refs_.push_back(obj);
|
back_refs_.push_back(obj);
|
||||||
|
@ -916,13 +916,7 @@ TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
|
|||||||
int object_size =
|
int object_size =
|
||||||
static_cast<int>(MemoryAllocator::GetCommitPageSize() - kApiTaggedSize);
|
static_cast<int>(MemoryAllocator::GetCommitPageSize() - kApiTaggedSize);
|
||||||
|
|
||||||
// TODO(v8:8875): Pointer compression does not enable aligned memory allocation
|
int alignment = USE_ALLOCATION_ALIGNMENT_BOOL ? kDoubleSize : kTaggedSize;
|
||||||
// yet.
|
|
||||||
#ifdef V8_COMPRESS_POINTERS
|
|
||||||
int alignment = kInt32Size;
|
|
||||||
#else
|
|
||||||
int alignment = kDoubleSize;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
HeapObject object =
|
HeapObject object =
|
||||||
faked_space->AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
|
faked_space->AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
|
||||||
|
Loading…
Reference in New Issue
Block a user