Allowing map word to be used for other state in GC header.

This CL adds features to pack/unpack map words.

Currently V8 cannot store extra metadata in object headers -- because V8
objects do not have a proper header, but only a map pointer at the start
of the object. To store per-object metadata like marking data, a side
table is required as the per-object metadata storage.

This CL enables V8 to use higher unused bits in a 64-bit map word as
per-object metadata storage. Map pointer stores come with an extra step
to encode the metadata into the pointer (we call it "map packing").
Map pointer loads will also remove the metadata bits as well (we call it
"map packing").

Since the map word is no longer a valid pointer after packing, we also
change the tag of the packed map word to make it looks like a Smi. This
helps various GC and barrier code to correctly skip them instead of
blindly dereferencing this invalid pointer.

A ninja flag `v8_enable_map_packing` is provided to turn this
map-packing feature on and off. It is disabled by default.

* Only works on x64 platform, with `v8_enable_pointer_compression`
  set to `false`

Bug: v8:11624
Change-Id: Ia2bdf79553945e5fc0b0874c87803d2cc733e073
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2247561
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
Reviewed-by: Toon Verwaest <verwaest@chromium.org>
Reviewed-by: Georg Neis <neis@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73915}
This commit is contained in:
Wenyu Zhao 2021-04-06 22:01:44 +10:00 committed by Commit Bot
parent 71d0a9dde5
commit 5e0b94c4dc
80 changed files with 773 additions and 148 deletions

View File

@ -228,6 +228,7 @@ Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Wei Wu <lazyparser@gmail.com>
Wenlu Wang <kingwenlu@gmail.com>
Wenyu Zhao <wenyu.zhao@anu.edu.au>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
Wouter Vermeiren <wouter.vermeiren@essensium.com>
Xiaofang Zou <zouxiaofang@iscas.ac.cn>

View File

@ -300,6 +300,9 @@ declare_args() {
# meaning that they are not switched to fast mode.
# Sets -DV8_DICT_PROPERTY_CONST_TRACKING
v8_dict_property_const_tracking = false
# Enable map packing & unpacking (sets -dV8_MAP_PACKING).
v8_enable_map_packing = false
}
# Derived defaults.
@ -330,7 +333,7 @@ if (v8_enable_snapshot_native_code_counters == "") {
}
if (v8_enable_pointer_compression == "") {
v8_enable_pointer_compression =
v8_current_cpu == "arm64" || v8_current_cpu == "x64"
(v8_current_cpu == "arm64" || v8_current_cpu == "x64")
}
if (v8_enable_pointer_compression_shared_cage == "") {
v8_enable_pointer_compression_shared_cage = false
@ -404,6 +407,12 @@ if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) {
"Sharing read-only heap with pointer compression is only supported on Linux or Android")
}
assert(!v8_enable_map_packing || !v8_enable_pointer_compression,
"Map packing does not support pointer compression")
assert(!v8_enable_map_packing || v8_current_cpu == "x64",
"Map packing is only supported on x64")
assert(
!v8_enable_pointer_compression_shared_cage || !v8_enable_shared_ro_heap,
"Sharing read-only heap is not yet supported when sharing a pointer compression cage")
@ -566,6 +575,7 @@ external_v8_defines = [
"V8_IMMINENT_DEPRECATION_WARNINGS",
"V8_NO_ARGUMENTS_ADAPTOR",
"V8_USE_PERFETTO",
"V8_MAP_PACKING",
]
enabled_external_v8_defines = []
@ -599,6 +609,9 @@ if (v8_imminent_deprecation_warnings) {
if (v8_use_perfetto) {
enabled_external_v8_defines += [ "V8_USE_PERFETTO" ]
}
if (v8_enable_map_packing) {
enabled_external_v8_defines += [ "V8_MAP_PACKING" ]
}
disabled_external_v8_defines = external_v8_defines - enabled_external_v8_defines

View File

@ -40,6 +40,13 @@ const int kWeakHeapObjectTag = 3;
const int kHeapObjectTagSize = 2;
const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
// Tag information for fowarding pointers stored in object headers.
// 0b00 at the lowest 2 bits in the header indicates that the map word is a
// forwarding pointer.
const int kForwardingTag = 0;
const int kForwardingTagSize = 2;
const intptr_t kForwardingTagMask = (1 << kForwardingTagSize) - 1;
// Tag information for Smi.
const int kSmiTag = 0;
const int kSmiTagSize = 1;
@ -177,6 +184,14 @@ V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
* depend on functions and constants defined here.
*/
class Internals {
#ifdef V8_MAP_PACKING
V8_INLINE static constexpr internal::Address UnpackMapWord(
internal::Address mapword) {
// TODO(wenyuzhao): Clear header metadata.
return mapword ^ kMapWordXorMask;
}
#endif
public:
// These values match non-compiler-dependent values defined within
// the implementation of v8.
@ -253,6 +268,17 @@ class Internals {
// incremental GC once the external memory reaches this limit.
static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024;
#ifdef V8_MAP_PACKING
static const uintptr_t kMapWordMetadataMask = 0xffffULL << 48;
// The lowest two bits of mapwords are always `0b10`
static const uintptr_t kMapWordSignature = 0b10;
// XORing a (non-compressed) map with this mask ensures that the two
// low-order bits are 0b10. The 0 at the end makes this look like a Smi,
// although real Smis have all lower 32 bits unset. We only rely on these
// values passing as Smis in very few places.
static const int kMapWordXorMask = 0b11;
#endif
V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
#ifdef V8_ENABLE_CHECKS
@ -279,6 +305,9 @@ class Internals {
V8_INLINE static int GetInstanceType(const internal::Address obj) {
typedef internal::Address A;
A map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
#ifdef V8_MAP_PACKING
map = UnpackMapWord(map);
#endif
return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset);
}

View File

@ -2079,7 +2079,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label ok, fail;
__ AssertNotSmi(rbx);
Register map = r9;
__ LoadTaggedPointerField(map, FieldOperand(rbx, HeapObject::kMapOffset));
__ LoadMap(map, rbx);
__ CmpInstanceType(map, FIXED_ARRAY_TYPE);
__ j(equal, &ok);
__ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
@ -2166,7 +2166,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
__ LoadTaggedPointerField(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ LoadMap(rbx, rdx);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
@ -2592,7 +2592,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(rdi, &non_constructor);
// Check if target has a [[Construct]] internal method.
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
__ LoadMap(rcx, rdi);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(zero, &non_constructor);
@ -3866,9 +3866,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
Register map = rcx;
__ JumpIfSmi(return_value, &ok, Label::kNear);
__ LoadTaggedPointerField(map,
FieldOperand(return_value, HeapObject::kMapOffset));
__ LoadMap(map, return_value);
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);

View File

@ -1619,6 +1619,8 @@ TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
TNode<HeapObject> object, int offset) {
// Please use LoadMap(object) instead.
DCHECK_NE(offset, HeapObject::kMapOffset);
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
@ -1631,6 +1633,8 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
TNode<HeapObject> object, int offset) {
// Please use LoadMap(object) instead.
DCHECK_NE(offset, HeapObject::kMapOffset);
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
@ -1656,7 +1660,15 @@ TNode<Map> CodeStubAssembler::GetInstanceTypeMap(InstanceType instance_type) {
}
TNode<Map> CodeStubAssembler::LoadMap(TNode<HeapObject> object) {
return LoadObjectField<Map>(object, HeapObject::kMapOffset);
TNode<Map> map = LoadObjectField<Map>(object, HeapObject::kMapOffset);
#ifdef V8_MAP_PACKING
// Check the loaded map is unpacked. i.e. the lowest two bits != 0b10
CSA_ASSERT(this,
WordNotEqual(WordAnd(BitcastTaggedToWord(map),
IntPtrConstant(Internals::kMapWordXorMask)),
IntPtrConstant(Internals::kMapWordSignature)));
#endif
return map;
}
TNode<Uint16T> CodeStubAssembler::LoadInstanceType(TNode<HeapObject> object) {
@ -2033,6 +2045,13 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
Goto(if_strong);
}
void CodeStubAssembler::AssertHasValidMap(TNode<HeapObject> object) {
#ifdef V8_MAP_PACKING
// Test if the map is an unpacked and valid map
CSA_ASSERT(this, IsMap(LoadMap(object)));
#endif
}
TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) {
return Word32Equal(Word32And(TruncateIntPtrToInt32(
BitcastTaggedToWordForTagAndSmiBits(value)),
@ -2943,12 +2962,14 @@ void CodeStubAssembler::StoreObjectField(TNode<HeapObject> object,
void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier(
TNode<HeapObject> object, int offset, TNode<Object> value) {
DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead.
OptimizedStoreFieldUnsafeNoWriteBarrier(MachineRepresentation::kTagged,
object, offset, value);
}
void CodeStubAssembler::StoreMap(TNode<HeapObject> object, TNode<Map> map) {
OptimizedStoreMap(object, map);
AssertHasValidMap(object);
}
void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
@ -2958,16 +2979,19 @@ void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
TNode<Map> map) {
OptimizedStoreFieldAssertNoWriteBarrier(MachineRepresentation::kTaggedPointer,
object, HeapObject::kMapOffset, map);
OptimizedStoreMap(object, map);
AssertHasValidMap(object);
}
void CodeStubAssembler::StoreObjectFieldRoot(TNode<HeapObject> object,
int offset, RootIndex root_index) {
if (RootsTable::IsImmortalImmovable(root_index)) {
StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index));
TNode<Object> root = LoadRoot(root_index);
if (offset == HeapObject::kMapOffset) {
StoreMap(object, CAST(root));
} else if (RootsTable::IsImmortalImmovable(root_index)) {
StoreObjectFieldNoWriteBarrier(object, offset, root);
} else {
StoreObjectField(object, offset, LoadRoot(root_index));
StoreObjectField(object, offset, root);
}
}
@ -11102,9 +11126,12 @@ void CodeStubAssembler::TrapAllocationMemento(TNode<JSObject> object,
// Memento map check.
BIND(&map_check);
{
TNode<Object> memento_map = LoadObjectField(object, kMementoMapOffset);
Branch(TaggedEqual(memento_map, AllocationMementoMapConstant()),
memento_found, &no_memento_found);
TNode<AnyTaggedT> maybe_mapword =
LoadObjectField(object, kMementoMapOffset);
TNode<AnyTaggedT> memento_mapword =
LoadRootMapWord(RootIndex::kAllocationMementoMap);
Branch(TaggedEqual(maybe_mapword, memento_mapword), memento_found,
&no_memento_found);
}
BIND(&no_memento_found);
Comment("] TrapAllocationMemento");
@ -11323,7 +11350,12 @@ void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
TNode<Object> root_value = LoadRoot(root_index);
TNode<AnyTaggedT> root_value;
if (root_index == RootIndex::kOnePointerFillerMap) {
root_value = LoadRootMapWord(root_index);
} else {
root_value = LoadRoot(root_index);
}
BuildFastLoop<IntPtrT>(
end_offset, start_offset,
[=](TNode<IntPtrT> current) {

View File

@ -1088,7 +1088,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Load a field from an object on the heap.
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<Object>>::value,
std::is_convertible<TNode<T>, TNode<Object>>::value &&
std::is_base_of<T, Map>::value,
int>::type = 0>
TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
const MachineType machine_type = offset == HeapObject::kMapOffset
? MachineType::MapInHeader()
: MachineTypeOf<T>::value;
return CAST(LoadFromObject(machine_type, object,
IntPtrConstant(offset - kHeapObjectTag)));
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<Object>>::value &&
!std::is_base_of<T, Map>::value,
int>::type = 0>
TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
return CAST(LoadFromObject(MachineTypeOf<T>::value, object,
@ -1163,6 +1175,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_convertible<TNode<T>, TNode<Object>>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
if (IsMapOffsetConstant(reference.offset)) {
TNode<Map> map = LoadMap(CAST(reference.object));
DCHECK((std::is_base_of<T, Map>::value));
return ReinterpretCast<T>(map);
}
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
CSA_ASSERT(this, TaggedIsNotSmi(reference.object));
@ -1175,6 +1193,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_same<T, MaybeObject>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
DCHECK(!IsMapOffsetConstant(reference.offset));
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
return UncheckedCast<T>(
@ -1185,6 +1204,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_same<T, MaybeObject>::value,
int>::type = 0>
void StoreReference(Reference reference, TNode<T> value) {
if (IsMapOffsetConstant(reference.offset)) {
DCHECK((std::is_base_of<T, Map>::value));
return StoreMap(CAST(reference.object), ReinterpretCast<Map>(value));
}
MachineRepresentation rep = MachineRepresentationOf<T>::value;
StoreToObjectWriteBarrier write_barrier = StoreToObjectWriteBarrier::kFull;
if (std::is_same<T, Smi>::value) {
@ -1201,6 +1224,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
void StoreReference(Reference reference, TNode<T> value) {
DCHECK(!IsMapOffsetConstant(reference.offset));
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
StoreToObject(MachineRepresentationOf<T>::value, reference.object, offset,
@ -3884,6 +3908,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return CodeAssembler::LoadRoot(root_index);
}
TNode<AnyTaggedT> LoadRootMapWord(RootIndex root_index) {
return CodeAssembler::LoadRootMapWord(root_index);
}
template <typename TIndex>
void StoreFixedArrayOrPropertyArrayElement(
TNode<UnionT<FixedArray, PropertyArray>> array, TNode<TIndex> index,
@ -3923,6 +3951,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Number>* var_result,
Label* if_bailout);
void AssertHasValidMap(TNode<HeapObject> object);
template <typename TValue>
void EmitElementStoreTypedArray(TNode<JSTypedArray> typed_array,
TNode<IntPtrT> key, TNode<Object> value,

View File

@ -55,6 +55,8 @@ const char* MachineReprToString(MachineRepresentation rep) {
return "kRepCompressedPointer";
case MachineRepresentation::kCompressed:
return "kRepCompressed";
case MachineRepresentation::kMapWord:
return "kRepMapWord";
}
UNREACHABLE();
}

View File

@ -22,6 +22,19 @@ enum class MachineRepresentation : uint8_t {
kWord16,
kWord32,
kWord64,
// (uncompressed) MapWord
// kMapWord is the representation of a map word, i.e. a map in the header
// of a HeapObject.
// If V8_MAP_PACKING is disabled, a map word is just the map itself. Hence
// kMapWord is equivalent to kTaggedPointer -- in fact it will be
// translated to kTaggedPointer during memory lowering.
// If V8_MAP_PACKING is enabled, a map word is a Smi-like encoding of a map
// and some meta data. Memory lowering of kMapWord loads/stores
// produces low-level kTagged loads/stores plus the necessary
// decode/encode operations.
// In either case, the kMapWord representation is not used after memory
// lowering.
kMapWord,
kTaggedSigned, // (uncompressed) Smi
kTaggedPointer, // (uncompressed) HeapObject
kTagged, // (uncompressed) Object (Smi or HeapObject)
@ -102,6 +115,10 @@ class MachineType {
return representation() == MachineRepresentation::kNone;
}
constexpr bool IsMapWord() const {
return representation() == MachineRepresentation::kMapWord;
}
constexpr bool IsSigned() const {
return semantic() == MachineSemantic::kInt32 ||
semantic() == MachineSemantic::kInt64;
@ -187,6 +204,9 @@ class MachineType {
return MachineType(MachineRepresentation::kTaggedPointer,
MachineSemantic::kAny);
}
constexpr static MachineType MapInHeader() {
return MachineType(MachineRepresentation::kMapWord, MachineSemantic::kAny);
}
constexpr static MachineType TaggedSigned() {
return MachineType(MachineRepresentation::kTaggedSigned,
MachineSemantic::kInt32);
@ -283,7 +303,8 @@ inline bool IsFloatingPoint(MachineRepresentation rep) {
inline bool CanBeTaggedPointer(MachineRepresentation rep) {
return rep == MachineRepresentation::kTagged ||
rep == MachineRepresentation::kTaggedPointer;
rep == MachineRepresentation::kTaggedPointer ||
rep == MachineRepresentation::kMapWord;
}
inline bool CanBeTaggedSigned(MachineRepresentation rep) {
@ -328,6 +349,7 @@ V8_EXPORT_PRIVATE inline constexpr int ElementSizeLog2Of(
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
case MachineRepresentation::kMapWord:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
return kTaggedSizeLog2;

View File

@ -194,6 +194,9 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
void TurboAssembler::LoadMap(Register destination, Register object) {
LoadTaggedPointerField(destination,
FieldOperand(object, HeapObject::kMapOffset));
#ifdef V8_MAP_PACKING
UnpackMapWord(destination);
#endif
}
void TurboAssembler::LoadTaggedPointerField(Register destination,
@ -205,6 +208,16 @@ void TurboAssembler::LoadTaggedPointerField(Register destination,
}
}
#ifdef V8_MAP_PACKING
void TurboAssembler::UnpackMapWord(Register r) {
// Clear the top two bytes (which may include metadata). Must be in sync with
// MapWord::Unpack, and vice versa.
shlq(r, Immediate(16));
shrq(r, Immediate(16));
xorq(r, Immediate(Internals::kMapWordXorMask));
}
#endif
void TurboAssembler::LoadTaggedSignedField(Register destination,
Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
@ -2692,7 +2705,11 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
AssertNotSmi(object);
Cmp(object, isolate()->factory()->undefined_value());
j(equal, &done_checking);
Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
Register map = object;
Push(object);
LoadMap(map, object);
Cmp(map, isolate()->factory()->allocation_site_map());
Pop(object);
Assert(equal, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}

View File

@ -323,6 +323,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
j(less, dest);
}
#ifdef V8_MAP_PACKING
void UnpackMapWord(Register r);
#endif
void LoadMap(Register destination, Register object);
void Move(Register dst, Smi source);

View File

@ -31,11 +31,11 @@ FieldAccess AccessBuilder::ForExternalIntPtr() {
}
// static
FieldAccess AccessBuilder::ForMap() {
FieldAccess AccessBuilder::ForMap(WriteBarrierKind write_barrier) {
FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::TaggedPointer(),
kMapWriteBarrier};
Type::OtherInternal(), MachineType::MapInHeader(),
write_barrier};
return access;
}
@ -105,12 +105,12 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
}
// static
FieldAccess AccessBuilder::ForJSObjectInObjectProperty(const MapRef& map,
int index) {
FieldAccess AccessBuilder::ForJSObjectInObjectProperty(
const MapRef& map, int index, MachineType machine_type) {
int const offset = map.GetInObjectPropertyOffset(index);
FieldAccess access = {kTaggedBase, offset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::NonInternal(), MachineType::AnyTagged(),
Type::NonInternal(), machine_type,
kFullWriteBarrier};
return access;
}

View File

@ -31,7 +31,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Access to heap object fields and elements (based on tagged pointer).
// Provides access to HeapObject::map() field.
static FieldAccess ForMap();
static FieldAccess ForMap(WriteBarrierKind write_barrier = kMapWriteBarrier);
// Provides access to HeapNumber::value() field.
static FieldAccess ForHeapNumberValue();
@ -58,7 +58,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForJSObjectElements();
// Provides access to JSObject inobject property fields.
static FieldAccess ForJSObjectInObjectProperty(const MapRef& map, int index);
static FieldAccess ForJSObjectInObjectProperty(
const MapRef& map, int index,
MachineType machine_type = MachineType::AnyTagged());
static FieldAccess ForJSObjectOffset(
int offset, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);

View File

@ -626,6 +626,7 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@ -709,6 +710,7 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}

View File

@ -827,6 +827,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArm64LdrQ;
immediate_mode = kNoImmediate;
break;
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@ -936,6 +937,7 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kArm64StrQ;
immediate_mode = kNoImmediate;
break;
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}

View File

@ -551,6 +551,7 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}
@ -633,6 +634,7 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}

View File

@ -247,6 +247,8 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
case MachineRepresentation::kCompressed:
os << "|c";
break;
case MachineRepresentation::kMapWord:
UNREACHABLE();
}
return os << "]";
}
@ -914,6 +916,7 @@ static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kCompressed:
return rep;
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
break;
}

View File

@ -545,6 +545,8 @@ class LocationOperand : public InstructionOperand {
case MachineRepresentation::kWord16:
case MachineRepresentation::kNone:
return false;
case MachineRepresentation::kMapWord:
break;
}
UNREACHABLE();
}

View File

@ -373,6 +373,7 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
@ -1433,6 +1434,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}

View File

@ -513,6 +513,7 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
@ -1854,6 +1855,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}

View File

@ -225,6 +225,7 @@ void InstructionSelector::VisitLoad(Node* node) {
mode = kNoImmediate;
break;
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}

View File

@ -65,6 +65,7 @@ inline int ByteWidthForStackSlot(MachineRepresentation rep) {
case MachineRepresentation::kSimd128:
return kSimd128Size;
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
break;
}
UNREACHABLE();

View File

@ -298,6 +298,7 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
opcode = kX64Movdqu;
break;
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}
return opcode;
@ -332,6 +333,7 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
case MachineRepresentation::kSimd128: // Fall through.
return kX64Movdqu;
case MachineRepresentation::kNone:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}
UNREACHABLE();
@ -465,6 +467,7 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(!load_rep.IsMapWord());
VisitLoad(node, node, GetLoadOpcode(load_rep));
}
@ -479,6 +482,7 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
if (FLAG_enable_unconditional_write_barriers &&

View File

@ -331,6 +331,10 @@ TNode<Float64T> CodeAssembler::Float64Constant(double value) {
return UncheckedCast<Float64T>(jsgraph()->Float64Constant(value));
}
bool CodeAssembler::IsMapOffsetConstant(Node* node) {
return raw_assembler()->IsMapOffsetConstant(node);
}
bool CodeAssembler::TryToInt32Constant(TNode<IntegralT> node,
int32_t* out_value) {
{
@ -689,11 +693,15 @@ TNode<Object> CodeAssembler::LoadFullTagged(Node* base,
TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset,
LoadSensitivity needs_poisoning) {
// Please use LoadFromObject(MachineType::MapInHeader(), object,
// IntPtrConstant(-kHeapObjectTag)) instead.
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
return BitcastWordToTagged(Load<RawPtrT>(base, offset, needs_poisoning));
}
Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
TNode<WordT> offset) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
return raw_assembler()->AtomicLoad(type, base, offset);
}
@ -713,6 +721,27 @@ Node* CodeAssembler::LoadFromObject(MachineType type, TNode<Object> object,
return raw_assembler()->LoadFromObject(type, object, offset);
}
#ifdef V8_MAP_PACKING
Node* CodeAssembler::PackMapWord(Node* value) {
TNode<IntPtrT> map_word =
BitcastTaggedToWordForTagAndSmiBits(UncheckedCast<AnyTaggedT>(value));
TNode<WordT> packed = WordXor(UncheckedCast<WordT>(map_word),
IntPtrConstant(Internals::kMapWordXorMask));
return BitcastWordToTaggedSigned(packed);
}
#endif
TNode<AnyTaggedT> CodeAssembler::LoadRootMapWord(RootIndex root_index) {
#ifdef V8_MAP_PACKING
Handle<Object> root = isolate()->root_handle(root_index);
Node* map = HeapConstant(Handle<Map>::cast(root));
map = PackMapWord(map);
return ReinterpretCast<AnyTaggedT>(map);
#else
return LoadRoot(root_index);
#endif
}
TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
if (RootsTable::IsImmortalImmovable(root_index)) {
Handle<Object> root = isolate()->root_handle(root_index);
@ -794,11 +823,14 @@ void CodeAssembler::OptimizedStoreMap(TNode<HeapObject> object,
}
void CodeAssembler::Store(Node* base, Node* offset, Node* value) {
// Please use OptimizedStoreMap(base, value) instead.
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->Store(MachineRepresentation::kTagged, base, offset, value,
kFullWriteBarrier);
}
void CodeAssembler::StoreEphemeronKey(Node* base, Node* offset, Node* value) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->Store(MachineRepresentation::kTagged, base, offset, value,
kEphemeronKeyWriteBarrier);
}
@ -812,6 +844,8 @@ void CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
void CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* offset, Node* value) {
// Please use OptimizedStoreMap(base, value) instead.
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->Store(
rep, base, offset, value,
CanBeTaggedPointer(rep) ? kAssertNoWriteBarrier : kNoWriteBarrier);
@ -825,6 +859,8 @@ void CodeAssembler::UnsafeStoreNoWriteBarrier(MachineRepresentation rep,
void CodeAssembler::UnsafeStoreNoWriteBarrier(MachineRepresentation rep,
Node* base, Node* offset,
Node* value) {
// Please use OptimizedStoreMap(base, value) instead.
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
}
@ -837,12 +873,15 @@ void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base,
void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base,
TNode<IntPtrT> offset,
TNode<Object> tagged_value) {
// Please use OptimizedStoreMap(base, tagged_value) instead.
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
StoreNoWriteBarrier(MachineType::PointerRepresentation(), base, offset,
BitcastTaggedToWord(tagged_value));
}
void CodeAssembler::AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
TNode<WordT> offset, TNode<Word32T> value) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->AtomicStore(rep, base, offset, value);
}

View File

@ -585,6 +585,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return value ? Int32TrueConstant() : Int32FalseConstant();
}
bool IsMapOffsetConstant(Node* node);
bool TryToInt32Constant(TNode<IntegralT> node, int32_t* out_value);
bool TryToInt64Constant(TNode<IntegralT> node, int64_t* out_value);
bool TryToIntPtrConstant(TNode<IntegralT> node, intptr_t* out_value);
@ -789,8 +791,16 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset);
#ifdef V8_MAP_PACKING
Node* PackMapWord(Node* value);
#endif
// Load a value from the root array.
// If map packing is enabled, LoadRoot for a root map returns the unpacked map
// word (i.e., the map). Use LoadRootMapWord to obtain the packed map word
// instead.
TNode<Object> LoadRoot(RootIndex root_index);
TNode<AnyTaggedT> LoadRootMapWord(RootIndex root_index);
template <typename Type>
TNode<Type> UnalignedLoad(TNode<RawPtrT> base, TNode<IntPtrT> offset) {

View File

@ -5,6 +5,7 @@
#include "src/compiler/graph-assembler.h"
#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/linkage.h"
#include "src/compiler/schedule.h"
// For TNode types.
@ -530,6 +531,31 @@ Node* JSGraphAssembler::StoreField(FieldAccess const& access, Node* object,
value, effect(), control()));
}
#ifdef V8_MAP_PACKING
TNode<Map> GraphAssembler::UnpackMapWord(Node* map_word) {
map_word = BitcastTaggedToWordForTagAndSmiBits(map_word);
// TODO(wenyuzhao): Clear header metadata.
Node* map = WordXor(map_word, IntPtrConstant(Internals::kMapWordXorMask));
return TNode<Map>::UncheckedCast(BitcastWordToTagged(map));
}
Node* GraphAssembler::PackMapWord(TNode<Map> map) {
Node* map_word = BitcastTaggedToWordForTagAndSmiBits(map);
Node* packed = WordXor(map_word, IntPtrConstant(Internals::kMapWordXorMask));
return BitcastWordToTaggedSigned(packed);
}
#endif
TNode<Map> GraphAssembler::LoadMap(Node* object) {
Node* map_word = Load(MachineType::TaggedPointer(), object,
HeapObject::kMapOffset - kHeapObjectTag);
#ifdef V8_MAP_PACKING
return UnpackMapWord(map_word);
#else
return TNode<Map>::UncheckedCast(map_word);
#endif
}
Node* JSGraphAssembler::StoreElement(ElementAccess const& access, Node* object,
Node* index, Node* value) {
return AddNode(graph()->NewNode(simplified()->StoreElement(access), object,

View File

@ -270,6 +270,12 @@ class V8_EXPORT_PRIVATE GraphAssembler {
CHECKED_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
#undef BINOP_DECL
#ifdef V8_MAP_PACKING
Node* PackMapWord(TNode<Map> map);
TNode<Map> UnpackMapWord(Node* map_word);
#endif
TNode<Map> LoadMap(Node* object);
Node* DebugBreak();
// Unreachable nodes are similar to Goto in that they reset effect/control to

View File

@ -1712,6 +1712,9 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
int const boilerplate_length = boilerplate_map.GetInObjectProperties();
for (int index = static_cast<int>(inobject_fields.size());
index < boilerplate_length; ++index) {
DCHECK(!V8_MAP_PACKING_BOOL);
// TODO(wenyuzhao): Fix incorrect MachineType when V8_MAP_PACKING is
// enabled.
FieldAccess access =
AccessBuilder::ForJSObjectInObjectProperty(boilerplate_map, index);
Node* value = jsgraph()->HeapConstant(factory()->one_pointer_filler_map());

View File

@ -2606,6 +2606,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
case MachineRepresentation::kMapWord:
UNREACHABLE();
break;
}

View File

@ -1076,6 +1076,7 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
case MachineRepresentation::kMapWord:
if (Node* replacement = state->LookupElement(
object, index, access.machine_type.representation())) {
// Make sure we don't resurrect dead {replacement} nodes.
@ -1131,6 +1132,7 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
case MachineRepresentation::kMapWord:
state = state->AddElement(object, index, new_value,
access.machine_type.representation(), zone());
break;
@ -1424,6 +1426,7 @@ LoadElimination::IndexRange LoadElimination::FieldIndexOf(
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
case MachineRepresentation::kMapWord:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
break;

View File

@ -999,6 +999,7 @@ class MachineRepresentationChecker {
return IsAnyTagged(actual);
case MachineRepresentation::kCompressed:
return IsAnyCompressed(actual);
case MachineRepresentation::kMapWord:
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
// TODO(turbofan): At the moment, the machine graph doesn't contain

View File

@ -610,6 +610,7 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(Pointer) \
V(TaggedSigned) \
V(TaggedPointer) \
V(MapInHeader) \
V(AnyTagged) \
V(CompressedPointer) \
V(AnyCompressed)
@ -622,6 +623,7 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(kWord16) \
V(kWord32) \
V(kWord64) \
V(kMapWord) \
V(kTaggedSigned) \
V(kTaggedPointer) \
V(kTagged) \
@ -1332,6 +1334,7 @@ OVERFLOW_OP_LIST(OVERFLOW_OP)
#undef OVERFLOW_OP
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
DCHECK(!rep.IsMapWord());
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kLoad##Type; \
@ -1491,6 +1494,7 @@ const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep,
}
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
switch (store_rep.representation()) {
#define STORE(kRep) \
case MachineRepresentation::kRep: \

View File

@ -104,7 +104,8 @@ V8_EXPORT_PRIVATE LoadLaneParameters const& LoadLaneParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
// A Store needs a MachineType and a WriteBarrierKind in order to emit the
// correct write barrier.
// correct write barrier, and needs to state whether it is storing into the
// header word, so that the value can be packed, if necessary.
class StoreRepresentation final {
public:
StoreRepresentation(MachineRepresentation representation,

View File

@ -6,6 +6,7 @@
#include "src/codegen/interface-descriptors.h"
#include "src/common/external-pointer.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@ -292,12 +293,20 @@ Reduction MemoryLowering::ReduceAllocateRaw(
Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
MachineRepresentation rep = access.machine_type.representation();
const Operator* load_op = ElementSizeInBytes(rep) > kTaggedSize &&
!machine()->UnalignedLoadSupported(
access.machine_type.representation())
? machine()->UnalignedLoad(access.machine_type)
: machine()->Load(access.machine_type);
MachineType machine_type = access.machine_type;
if (machine_type.IsMapWord()) {
CHECK_EQ(machine_type.semantic(), MachineSemantic::kAny);
return ReduceLoadMap(node);
}
MachineRepresentation rep = machine_type.representation();
const Operator* load_op =
ElementSizeInBytes(rep) > kTaggedSize &&
!machine()->UnalignedLoadSupported(machine_type.representation())
? machine()->UnalignedLoad(machine_type)
: machine()->Load(machine_type);
NodeProperties::ChangeOp(node, load_op);
return Changed(node);
}
@ -308,6 +317,7 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
DCHECK(!type.IsMapWord());
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
@ -354,6 +364,22 @@ Node* MemoryLowering::DecodeExternalPointer(
#endif // V8_HEAP_SANDBOX
}
Reduction MemoryLowering::ReduceLoadMap(Node* node) {
#ifdef V8_MAP_PACKING
NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
__ InitializeEffectControl(effect, control);
node = __ AddNode(graph()->CloneNode(node));
return Replace(__ UnpackMapWord(node));
#else
NodeProperties::ChangeOp(node, machine()->Load(MachineType::TaggedPointer()));
return Changed(node);
#endif
}
Reduction MemoryLowering::ReduceLoadField(Node* node) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
@ -365,11 +391,19 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
// External pointer table indices are 32bit numbers
type = MachineType::Uint32();
}
if (type.IsMapWord()) {
DCHECK(!NeedsPoisoning(access.load_sensitivity));
DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
return ReduceLoadMap(node);
}
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
if (V8_HEAP_SANDBOX_BOOL &&
access.type.Is(Type::SandboxedExternalPointer())) {
#ifdef V8_HEAP_SANDBOX
@ -391,8 +425,10 @@ Reduction MemoryLowering::ReduceStoreToObject(Node* node,
ObjectAccess const& access = ObjectAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
DCHECK(!access.machine_type.IsMapWord());
MachineRepresentation rep = access.machine_type.representation();
StoreRepresentation store_rep(rep, write_barrier_kind);
const Operator* store_op = ElementSizeInBytes(rep) > kTaggedSize &&
@ -427,15 +463,29 @@ Reduction MemoryLowering::ReduceStoreField(Node* node,
DCHECK_IMPLIES(V8_HEAP_SANDBOX_BOOL,
!access.type.Is(Type::ExternalPointer()) &&
!access.type.Is(Type::SandboxedExternalPointer()));
MachineType machine_type = access.machine_type;
Node* object = node->InputAt(0);
Node* value = node->InputAt(1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
__ InitializeEffectControl(effect, control);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
Node* offset = __ IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph_zone(), 1, offset);
if (machine_type.IsMapWord()) {
machine_type = MachineType::TaggedPointer();
#ifdef V8_MAP_PACKING
Node* mapword = __ PackMapWord(TNode<Map>::UncheckedCast(value));
node->ReplaceInput(2, mapword);
#endif
}
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
node, machine()->Store(StoreRepresentation(machine_type.representation(),
write_barrier_kind)));
return Changed(node);
}

View File

@ -110,6 +110,7 @@ class MemoryLowering final : public Reducer {
AllocationState const* state,
WriteBarrierKind);
Node* DecodeExternalPointer(Node* encoded_pointer, ExternalPointerTag tag);
Reduction ReduceLoadMap(Node* encoded_pointer);
Node* ComputeIndex(ElementAccess const& access, Node* node);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;

View File

@ -257,6 +257,15 @@ bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
return false;
}
void MemoryOptimizer::ReplaceUsesAndKillNode(Node* node, Node* replacement) {
// Replace all uses of node and kill the node to make sure we don't leave
// dangling dead uses.
DCHECK_NE(replacement, node);
NodeProperties::ReplaceUses(node, replacement, graph_assembler_.effect(),
graph_assembler_.control());
node->Kill();
}
void MemoryOptimizer::VisitAllocateRaw(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
@ -294,12 +303,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
node, allocation_type, allocation.allow_large_objects(), &state);
CHECK(reduction.Changed() && reduction.replacement() != node);
// Replace all uses of node and kill the node to make sure we don't leave
// dangling dead uses.
NodeProperties::ReplaceUses(node, reduction.replacement(),
graph_assembler_.effect(),
graph_assembler_.control());
node->Kill();
ReplaceUsesAndKillNode(node, reduction.replacement());
EnqueueUses(state->effect(), state);
}
@ -307,8 +311,11 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
memory_lowering()->ReduceLoadFromObject(node);
Reduction reduction = memory_lowering()->ReduceLoadFromObject(node);
EnqueueUses(node, state);
if (V8_MAP_PACKING_BOOL && reduction.replacement() != node) {
ReplaceUsesAndKillNode(node, reduction.replacement());
}
}
void MemoryOptimizer::VisitStoreToObject(Node* node,
@ -333,16 +340,14 @@ void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
// lowering, so we can proceed iterating the graph from the node uses.
EnqueueUses(node, state);
// Node can be replaced only when V8_HEAP_SANDBOX_BOOL is enabled and
// when loading an external pointer value.
DCHECK_IMPLIES(!V8_HEAP_SANDBOX_BOOL, reduction.replacement() == node);
if (V8_HEAP_SANDBOX_BOOL && reduction.replacement() != node) {
// Replace all uses of node and kill the node to make sure we don't leave
// dangling dead uses.
NodeProperties::ReplaceUses(node, reduction.replacement(),
graph_assembler_.effect(),
graph_assembler_.control());
node->Kill();
// Node can be replaced under two cases:
// 1. V8_HEAP_SANDBOX_BOOL is enabled and loading an external pointer value.
// 2. V8_MAP_PACKING_BOOL is enabled.
DCHECK_IMPLIES(!V8_HEAP_SANDBOX_BOOL && !V8_MAP_PACKING_BOOL,
reduction.replacement() == node);
if ((V8_HEAP_SANDBOX_BOOL || V8_MAP_PACKING_BOOL) &&
reduction.replacement() != node) {
ReplaceUsesAndKillNode(node, reduction.replacement());
}
}

View File

@ -68,6 +68,8 @@ class MemoryOptimizer final {
void EnqueueUses(Node*, AllocationState const*);
void EnqueueUse(Node*, int, AllocationState const*);
void ReplaceUsesAndKillNode(Node* node, Node* replacement);
// Returns true if the AllocationType of the current AllocateRaw node that we
// are visiting needs to be updated to kOld, due to propagation of tenuring
// from outer to inner allocations.

View File

@ -15,6 +15,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/compiler/simplified-operator.h"
@ -147,10 +148,31 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* load = AddNode(op, base, index);
return load;
}
bool IsMapOffsetConstant(Node* node) {
Int64Matcher m(node);
if (m.Is(HeapObject::kMapOffset)) return true;
// Test if `node` is a `Phi(Int64Constant(0))`
if (node->opcode() == IrOpcode::kPhi) {
for (Node* input : node->inputs()) {
if (!Int64Matcher(input).Is(HeapObject::kMapOffset)) return false;
}
return true;
}
return false;
}
bool IsMapOffsetConstantMinusTag(Node* node) {
Int64Matcher m(node);
return m.Is(HeapObject::kMapOffset - kHeapObjectTag);
}
bool IsMapOffsetConstantMinusTag(int offset) {
return offset == HeapObject::kMapOffset - kHeapObjectTag;
}
Node* LoadFromObject(
MachineType type, Node* base, Node* offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
CHECK_EQ(needs_poisoning, LoadSensitivity::kSafe);
DCHECK_IMPLIES(IsMapOffsetConstantMinusTag(offset),
type == MachineType::MapInHeader());
ObjectAccess access = {type, WriteBarrierKind::kNoWriteBarrier};
Node* load = AddNode(simplified()->LoadFromObject(access), base, offset);
return load;
@ -169,18 +191,22 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* value, WriteBarrierKind write_barrier) {
ObjectAccess access = {MachineType::TypeForRepresentation(rep),
write_barrier};
DCHECK(!IsMapOffsetConstantMinusTag(offset));
AddNode(simplified()->StoreToObject(access), object, offset, value);
}
void OptimizedStoreField(MachineRepresentation rep, Node* object, int offset,
Node* value, WriteBarrierKind write_barrier) {
DCHECK(!IsMapOffsetConstantMinusTag(offset));
AddNode(simplified()->StoreField(FieldAccess(
BaseTaggedness::kTaggedBase, offset, MaybeHandle<Name>(),
MaybeHandle<Map>(), Type::Any(),
MachineType::TypeForRepresentation(rep), write_barrier)),
object, value);
}
void OptimizedStoreMap(Node* object, Node* value) {
AddNode(simplified()->StoreField(AccessBuilder::ForMap()), object, value);
void OptimizedStoreMap(Node* object, Node* value,
WriteBarrierKind write_barrier = kMapWriteBarrier) {
AddNode(simplified()->StoreField(AccessBuilder::ForMap(write_barrier)),
object, value);
}
Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
@ -245,6 +271,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
Node* value) {
DCHECK(!IsMapOffsetConstantMinusTag(index));
DCHECK_NE(rep, MachineRepresentation::kWord64);
return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
}

View File

@ -234,6 +234,7 @@ Node* RepresentationChanger::GetRepresentationFor(
return node;
case MachineRepresentation::kCompressed:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kMapWord:
UNREACHABLE();
}
UNREACHABLE();
@ -507,7 +508,8 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
break;
}
if (output_rep == MachineRepresentation::kTaggedSigned ||
output_rep == MachineRepresentation::kTaggedPointer) {
output_rep == MachineRepresentation::kTaggedPointer ||
output_rep == MachineRepresentation::kMapWord) {
// this is a no-op.
return node;
}

View File

@ -142,6 +142,7 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
return UseInfo::TaggedSigned();
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
case MachineRepresentation::kMapWord:
return UseInfo::AnyTagged();
case MachineRepresentation::kFloat64:
return UseInfo::TruncatingFloat64();

View File

@ -127,6 +127,13 @@ struct FieldAccess {
#endif
{
DCHECK_GE(offset, 0);
DCHECK_IMPLIES(
machine_type.IsMapWord(),
offset == HeapObject::kMapOffset && base_is_tagged != kUntaggedBase);
DCHECK_IMPLIES(machine_type.IsMapWord(),
(write_barrier_kind == kMapWriteBarrier ||
write_barrier_kind == kNoWriteBarrier ||
write_barrier_kind == kAssertNoWriteBarrier));
}
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }

View File

@ -291,10 +291,6 @@ class WasmGraphAssembler : public GraphAssembler {
// Maps and their contents.
Node* LoadMap(Node* heap_object) {
return LoadFromObject(MachineType::TaggedPointer(), heap_object,
wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
}
Node* LoadInstanceType(Node* map) {
return LoadFromObject(
MachineType::Uint16(), map,
@ -6682,9 +6678,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kF64: {
auto done = gasm_->MakeLabel();
gasm_->GotoIf(IsSmi(input), &done);
Node* map = gasm_->LoadFromObject(
MachineType::TaggedPointer(), input,
wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
Node* map = gasm_->LoadMap(input);
Node* heap_number_map = LOAD_ROOT(HeapNumberMap, heap_number_map);
Node* is_heap_number = gasm_->WordEqual(heap_number_map, map);
gasm_->GotoIf(is_heap_number, &done);

View File

@ -137,7 +137,7 @@ void MaybeObject::VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p) {
if (p->GetHeapObject(&heap_object)) {
HeapObject::VerifyHeapPointer(isolate, heap_object);
} else {
CHECK(p->IsSmi() || p->IsCleared());
CHECK(p->IsSmi() || p->IsCleared() || MapWord::IsPacked(p->ptr()));
}
}
@ -828,7 +828,6 @@ void JSFunction::JSFunctionVerify(Isolate* isolate) {
void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
// TODO(leszeks): Add a TorqueGeneratedClassVerifier for LocalIsolate.
TorqueGeneratedClassVerifiers::SharedFunctionInfoVerify(*this, isolate);
this->SharedFunctionInfoVerify(ReadOnlyRoots(isolate));
}

View File

@ -151,6 +151,12 @@ struct MaybeBoolFlag {
#define COMPRESS_POINTERS_BOOL false
#endif
#ifdef V8_MAP_PACKING
#define V8_MAP_PACKING_BOOL true
#else
#define V8_MAP_PACKING_BOOL false
#endif
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
#define COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL true
#else

View File

@ -2218,13 +2218,9 @@ void Factory::InitializeJSObjectBody(JSObject obj, Map map, int start_offset) {
// In case of Array subclassing the |map| could already be transitioned
// to different elements kind from the initial map on which we track slack.
bool in_progress = map.IsInobjectSlackTrackingInProgress();
Object filler;
if (in_progress) {
filler = *one_pointer_filler_map();
} else {
filler = *undefined_value();
}
obj.InitializeBody(map, start_offset, *undefined_value(), filler);
obj.InitializeBody(map, start_offset, in_progress,
ReadOnlyRoots(isolate()).one_pointer_filler_map_word(),
*undefined_value());
if (in_progress) {
map.FindRootMap(isolate()).InobjectSlackTrackingStep(isolate());
}

View File

@ -91,10 +91,10 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
FreeSpace n = top();
while (!n.is_null()) {
ObjectSlot map_slot = n.map_slot();
if (map_slot.contains_value(kNullAddress)) {
map_slot.store(free_space_map);
if (map_slot.contains_map_value(kNullAddress)) {
map_slot.store_map(free_space_map);
} else {
DCHECK(map_slot.contains_value(free_space_map.ptr()));
DCHECK(map_slot.contains_map_value(free_space_map.ptr()));
}
n = n.next();
}
@ -504,11 +504,12 @@ size_t FreeListCategory::SumFreeList() {
while (!cur.is_null()) {
// We can't use "cur->map()" here because both cur's map and the
// root can be null during bootstrapping.
DCHECK(cur.map_slot().contains_value(Page::FromHeapObject(cur)
->heap()
->isolate()
->root(RootIndex::kFreeSpaceMap)
.ptr()));
DCHECK(
cur.map_slot().contains_map_value(Page::FromHeapObject(cur)
->heap()
->isolate()
->root(RootIndex::kFreeSpaceMap)
.ptr()));
sum += cur.relaxed_read_size();
cur = cur.next();
}

View File

@ -502,7 +502,7 @@ AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
if (!candidate_map_slot.contains_value(
if (!candidate_map_slot.contains_map_value(
ReadOnlyRoots(this).allocation_memento_map().ptr())) {
return AllocationMemento();
}

View File

@ -68,7 +68,16 @@ void WriteBarrier::MarkingSlow(Heap* heap, DescriptorArray descriptor_array,
int WriteBarrier::MarkingFromCode(Address raw_host, Address raw_slot) {
HeapObject host = HeapObject::cast(Object(raw_host));
MaybeObjectSlot slot(raw_slot);
WriteBarrier::Marking(host, slot, *slot);
Address value = (*slot).ptr();
#ifdef V8_MAP_PACKING
if (slot.address() == host.address()) {
// Clear metadata bits and fix object tag.
value = (value & ~Internals::kMapWordMetadataMask &
~Internals::kMapWordXorMask) |
(uint64_t)kHeapObjectTag;
}
#endif
WriteBarrier::Marking(host, slot, MaybeObject(value));
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
}

View File

@ -2949,7 +2949,7 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
DCHECK((filler.map_slot().contains_value(kNullAddress) &&
DCHECK((filler.map_slot().contains_map_value(kNullAddress) &&
!Heap::FromWritableHeapObject(filler)->deserialization_complete()) ||
filler.map().IsMap());
@ -3170,7 +3170,8 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
RELAXED_WRITE_FIELD(object, bytes_to_trim,
Object(MapWord::FromMap(map).ptr()));
RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
Smi::FromInt(len - elements_to_trim));
@ -3568,6 +3569,8 @@ class SlotCollectingVisitor final : public ObjectVisitor {
UNREACHABLE();
}
void VisitMapPointer(HeapObject object) override {} // do nothing by default
int number_of_slots() { return static_cast<int>(slots_.size()); }
MaybeObjectSlot slot(int i) { return slots_[i]; }
@ -4133,7 +4136,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
ObjectSlot end) override {
#ifdef DEBUG
for (ObjectSlot slot = start; slot < end; ++slot) {
DCHECK(!HasWeakHeapObjectTag(*slot));
DCHECK(!MapWord::IsPacked((*slot).ptr()) || !HasWeakHeapObjectTag(*slot));
}
#endif // DEBUG
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
@ -4383,7 +4386,9 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) FixHandle(p);
for (FullObjectSlot p = start; p < end; ++p) {
FixHandle(p);
}
}
private:
@ -5929,6 +5934,9 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
explicit MarkingVisitor(UnreachableObjectsFilter* filter)
: filter_(filter) {}
void VisitMapPointer(HeapObject object) override {
MarkHeapObject(Map::unchecked_cast(object.map()));
}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
@ -6437,7 +6445,8 @@ void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
if (object.GetHeapObject(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
} else {
CHECK(object.IsSmi() || object.IsCleared());
CHECK(object.IsSmi() || object.IsCleared() ||
MapWord::IsPacked(object.ptr()));
}
}
}

View File

@ -107,18 +107,23 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
DCHECK(!MapWord::IsPacked((*p).ptr()));
MarkObjectByPointer(p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
for (FullObjectSlot p = start; p < end; ++p) {
DCHECK(!MapWord::IsPacked((*p).ptr()));
MarkObjectByPointer(p);
}
}
private:
void MarkObjectByPointer(FullObjectSlot p) {
Object obj = *p;
if (!obj.IsHeapObject()) return;
DCHECK(!MapWord::IsPacked(obj.ptr()));
heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
}

View File

@ -205,9 +205,10 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject black_object = HeapObject::FromAddress(addr);
Object map_object = ObjectSlot(addr).Acquire_Load();
Object map_object = black_object.synchronized_map();
CHECK(map_object.IsMap());
map = Map::cast(map_object);
DCHECK(map.IsMap());
size = black_object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
Address end = addr + size - kTaggedSize;
@ -235,10 +236,11 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
object = black_object;
}
} else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
Object map_object = ObjectSlot(addr).Acquire_Load();
object = HeapObject::FromAddress(addr);
Object map_object = object.synchronized_map();
CHECK(map_object.IsMap());
map = Map::cast(map_object);
object = HeapObject::FromAddress(addr);
DCHECK(map.IsMap());
size = object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
}

View File

@ -84,6 +84,7 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) = 0;
virtual void VerifyMap(Map map) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
@ -107,6 +108,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
VerifyRootPointers(start, end);
}
void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
void VerifyRoots();
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
void VerifyMarking(NewSpace* new_space);
@ -210,6 +213,8 @@ class FullMarkingVerifier : public MarkingVerifier {
return marking_state_->IsBlackOrGrey(object);
}
void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@ -273,11 +278,14 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
VerifyRootPointers(start, end);
}
void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
protected:
explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
inline Heap* heap() { return heap_; }
virtual void VerifyMap(Map map) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
@ -356,7 +364,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
}
}
void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@ -956,12 +964,15 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) final {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
MarkObjectByPointer(root, p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) final {
for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
for (FullObjectSlot p = start; p < end; ++p) {
MarkObjectByPointer(root, p);
}
}
private:
@ -993,8 +1004,12 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
MarkObject(host, *p);
}
void VisitMapPointer(HeapObject host) final { MarkObject(host, host.map()); }
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
for (ObjectSlot p = start; p < end; ++p) {
// The map slot should be handled in VisitMapPointer.
DCHECK_NE(host.map_slot(), p);
DCHECK(!HasWeakHeapObjectTag(*p));
MarkObject(host, *p);
}
@ -1147,6 +1162,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
}
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
RecordMigratedSlot(host, *p, p.address());
}
@ -2761,6 +2777,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
UpdateRootSlotInternal(cage_base_, p);
}
@ -4144,6 +4161,8 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
}
protected:
void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@ -4212,7 +4231,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
}
}
}
void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@ -4481,6 +4500,7 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) final {
for (FullObjectSlot p = start; p < end; ++p) {
DCHECK(!MapWord::IsPacked((*p).ptr()));
MarkObjectByPointer(p);
}
}

View File

@ -355,7 +355,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkDescriptorArrayBlack(
DescriptorArray descriptors) {
concrete_visitor()->marking_state()->WhiteToGrey(descriptors);
if (concrete_visitor()->marking_state()->GreyToBlack(descriptors)) {
VisitPointer(descriptors, descriptors.map_slot());
VisitMapPointer(descriptors);
VisitPointers(descriptors, descriptors.GetFirstPointerSlot(),
descriptors.GetDescriptorSlot(0));
return DescriptorArray::BodyDescriptor::SizeOf(descriptors.map(),

View File

@ -133,6 +133,11 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
V8_INLINE int VisitWeakCell(Map map, WeakCell object);
// ObjectVisitor overrides.
V8_INLINE void VisitMapPointer(HeapObject host) final {
// Note that we are skipping the recording the slot because map objects
// can't move, so this is safe (see ProcessStrongHeapObject for comparison)
MarkObject(host, HeapObject::cast(host.map()));
}
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
}

View File

@ -77,7 +77,8 @@ template <typename ResultType, typename ConcreteVisitor>
void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
HeapObject host) {
DCHECK(!host.map_word().IsForwardingAddress());
static_cast<ConcreteVisitor*>(this)->VisitPointer(host, host.map_slot());
if (!static_cast<ConcreteVisitor*>(this)->ShouldVisitMapPointer()) return;
static_cast<ConcreteVisitor*>(this)->VisitMapPointer(host);
}
#define VISIT(TypeName) \

View File

@ -78,6 +78,8 @@ class HeapVisitor : public ObjectVisitor {
public:
V8_INLINE ResultType Visit(HeapObject object);
V8_INLINE ResultType Visit(Map map, HeapObject object);
// A callback for visiting the map pointer in the object header.
V8_INLINE void VisitMapPointer(HeapObject host);
protected:
// A guard predicate for visiting the object.
@ -86,8 +88,6 @@ class HeapVisitor : public ObjectVisitor {
V8_INLINE bool ShouldVisit(HeapObject object) { return true; }
// Guard predicate for visiting the objects map pointer separately.
V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
V8_INLINE void VisitMapPointer(HeapObject host);
// If this predicate returns false, then the heap visitor will fail
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }

View File

@ -734,6 +734,7 @@ void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
FullObjectSlot p) {
DCHECK(!HasWeakHeapObjectTag(*p));
DCHECK(!MapWord::IsPacked((*p).ptr()));
ScavengePointer(p);
}
@ -741,12 +742,15 @@ void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
FullObjectSlot start,
FullObjectSlot end) {
// Copy all HeapObject pointers in [start, end)
for (FullObjectSlot p = start; p < end; ++p) ScavengePointer(p);
for (FullObjectSlot p = start; p < end; ++p) {
ScavengePointer(p);
}
}
void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
Object object = *p;
DCHECK(!HasWeakHeapObjectTag(object));
DCHECK(!MapWord::IsPacked(object.ptr()));
if (Heap::InYoungGeneration(object)) {
scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
}

View File

@ -394,6 +394,7 @@ int Sweeper::RawSweep(
&old_to_new_cleanup);
}
Map map = object.synchronized_map();
DCHECK(map.IsMap());
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;

View File

@ -28,6 +28,13 @@ bool CompressedObjectSlot::contains_value(Address raw_value) const {
static_cast<uint32_t>(static_cast<Tagged_t>(raw_value));
}
bool CompressedObjectSlot::contains_map_value(Address raw_value) const {
// Simply forward to contains_value because map packing is not supported with
// pointer compression.
DCHECK(!V8_MAP_PACKING_BOOL);
return contains_value(raw_value);
}
Object CompressedObjectSlot::operator*() const {
Tagged_t value = *location();
return Object(DecompressTaggedAny(address(), value));
@ -42,6 +49,20 @@ void CompressedObjectSlot::store(Object value) const {
*location() = CompressTagged(value.ptr());
}
void CompressedObjectSlot::store_map(Map map) const {
// Simply forward to store because map packing is not supported with pointer
// compression.
DCHECK(!V8_MAP_PACKING_BOOL);
store(map);
}
Map CompressedObjectSlot::load_map() const {
// Simply forward to Relaxed_Load because map packing is not supported with
// pointer compression.
DCHECK(!V8_MAP_PACKING_BOOL);
return Map::unchecked_cast(Relaxed_Load());
}
Object CompressedObjectSlot::Acquire_Load() const {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
return Object(DecompressTaggedAny(address(), value));

View File

@ -37,12 +37,16 @@ class CompressedObjectSlot : public SlotBase<CompressedObjectSlot, Tagged_t> {
// Compares memory representation of a value stored in the slot with given
// raw value without decompression.
inline bool contains_value(Address raw_value) const;
inline bool contains_map_value(Address raw_value) const;
// TODO(leszeks): Consider deprecating the operator* load, and always pass the
// Isolate.
inline Object operator*() const;
inline Object load(PtrComprCageBase cage_base) const;
inline void store(Object value) const;
inline void store_map(Map map) const;
inline Map load_map() const;
inline Object Acquire_Load() const;
inline Object Relaxed_Load() const;

View File

@ -50,9 +50,9 @@ bool FreeSpace::IsValid() {
Heap* heap = GetHeapFromWritableObject(*this);
Object free_space_map =
Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
CHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
CHECK_IMPLIES(!map_slot().contains_map_value(free_space_map.ptr()),
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
map_slot().contains_map_value(kNullAddress));
CHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
return true;
}

View File

@ -401,25 +401,31 @@ Object JSObject::InObjectPropertyAtPut(int index, Object value,
}
void JSObject::InitializeBody(Map map, int start_offset,
Object pre_allocated_value, Object filler_value) {
DCHECK_IMPLIES(filler_value.IsHeapObject(),
!ObjectInYoungGeneration(filler_value));
DCHECK_IMPLIES(pre_allocated_value.IsHeapObject(),
!ObjectInYoungGeneration(pre_allocated_value));
bool is_slack_tracking_in_progress,
MapWord filler_map, Object undefined_filler) {
int size = map.instance_size();
int offset = start_offset;
if (filler_value != pre_allocated_value) {
if (is_slack_tracking_in_progress) {
int end_of_pre_allocated_offset =
size - (map.UnusedPropertyFields() * kTaggedSize);
DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
// fill start with references to the undefined value object
while (offset < end_of_pre_allocated_offset) {
WRITE_FIELD(*this, offset, pre_allocated_value);
WRITE_FIELD(*this, offset, undefined_filler);
offset += kTaggedSize;
}
// fill the remainder with one word filler objects (ie just a map word)
while (offset < size) {
Object fm = Object(filler_map.ptr());
WRITE_FIELD(*this, offset, fm);
offset += kTaggedSize;
}
} else {
while (offset < size) {
// fill with references to the undefined value object
WRITE_FIELD(*this, offset, undefined_filler);
offset += kTaggedSize;
}
}
while (offset < size) {
WRITE_FIELD(*this, offset, filler_value);
offset += kTaggedSize;
}
}

View File

@ -680,11 +680,12 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// Initializes the body starting at |start_offset|. It is responsibility of
// the caller to initialize object header. Fill the pre-allocated fields with
// pre_allocated_value and the rest with filler_value.
// undefined_value and the rest with filler_map.
// Note: this call does not update write barrier, the caller is responsible
// to ensure that |filler_value| can be collected without WB here.
// to ensure that |filler_map| can be collected without WB here.
inline void InitializeBody(Map map, int start_offset,
Object pre_allocated_value, Object filler_value);
bool is_slack_tracking_in_progress,
MapWord filler_map, Object undefined_value);
// Check whether this object references another object
bool ReferencesObject(Object obj);

View File

@ -103,12 +103,17 @@ DISABLE_CFI_PERF void BodyDescriptorBase::IteratePointers(HeapObject obj,
int start_offset,
int end_offset,
ObjectVisitor* v) {
if (start_offset == HeapObject::kMapOffset) {
v->VisitMapPointer(obj);
start_offset += kTaggedSize;
}
v->VisitPointers(obj, obj.RawField(start_offset), obj.RawField(end_offset));
}
template <typename ObjectVisitor>
void BodyDescriptorBase::IteratePointer(HeapObject obj, int offset,
ObjectVisitor* v) {
DCHECK_NE(offset, HeapObject::kMapOffset);
v->VisitPointer(obj, obj.RawField(offset));
}
@ -122,6 +127,7 @@ DISABLE_CFI_PERF void BodyDescriptorBase::IterateMaybeWeakPointers(
template <typename ObjectVisitor>
void BodyDescriptorBase::IterateMaybeWeakPointer(HeapObject obj, int offset,
ObjectVisitor* v) {
DCHECK_NE(offset, HeapObject::kMapOffset);
v->VisitPointer(obj, obj.RawMaybeWeakField(offset));
}
@ -1134,7 +1140,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
template <typename ObjectVisitor>
void HeapObject::IterateFast(ObjectVisitor* v) {
BodyDescriptorBase::IteratePointer(*this, kMapOffset, v);
v->VisitMapPointer(*this);
IterateBodyFast(v);
}

View File

@ -649,11 +649,26 @@ MaybeObjectSlot HeapObject::RawMaybeWeakField(int byte_offset) const {
return MaybeObjectSlot(field_address(byte_offset));
}
MapWord MapWord::FromMap(const Map map) { return MapWord(map.ptr()); }
MapWord MapWord::FromMap(const Map map) {
DCHECK(map.is_null() || !MapWord::IsPacked(map.ptr()));
#ifdef V8_MAP_PACKING
return MapWord(Pack(map.ptr()));
#else
return MapWord(map.ptr());
#endif
}
Map MapWord::ToMap() const { return Map::unchecked_cast(Object(value_)); }
Map MapWord::ToMap() const {
#ifdef V8_MAP_PACKING
return Map::unchecked_cast(Object(Unpack(value_)));
#else
return Map::unchecked_cast(Object(value_));
#endif
}
bool MapWord::IsForwardingAddress() const { return HAS_SMI_TAG(value_); }
bool MapWord::IsForwardingAddress() const {
return (value_ & kForwardingTagMask) == kForwardingTag;
}
MapWord MapWord::FromForwardingAddress(HeapObject object) {
return MapWord(object.ptr() - kHeapObjectTag);
@ -745,7 +760,8 @@ void HeapObject::set_map_no_write_barrier(Map value) {
}
void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
set_map_word(MapWord::FromMap(value));
MapWord mapword = MapWord::FromMap(value);
set_map_word(mapword);
#ifndef V8_DISABLE_WRITE_BARRIERS
if (mode != SKIP_WRITE_BARRIER) {
DCHECK(!value.is_null());
@ -761,19 +777,19 @@ ObjectSlot HeapObject::map_slot() const {
}
DEF_GETTER(HeapObject, map_word, MapWord) {
return MapField::Relaxed_Load(cage_base, *this);
return MapField::Relaxed_Load_Map_Word(cage_base, *this);
}
void HeapObject::set_map_word(MapWord map_word) {
MapField::Relaxed_Store(*this, map_word);
MapField::Relaxed_Store_Map_Word(*this, map_word);
}
DEF_GETTER(HeapObject, synchronized_map_word, MapWord) {
return MapField::Acquire_Load(cage_base, *this);
return MapField::Acquire_Load_No_Unpack(cage_base, *this);
}
void HeapObject::synchronized_set_map_word(MapWord map_word) {
MapField::Release_Store(*this, map_word);
MapField::Release_Store_Map_Word(*this, map_word);
}
bool HeapObject::release_compare_and_swap_map_word(MapWord old_map_word,

View File

@ -777,6 +777,23 @@ class MapWord {
inline Address ptr() { return value_; }
#ifdef V8_MAP_PACKING
static constexpr Address Pack(Address map) {
return map ^ Internals::kMapWordXorMask;
}
static constexpr Address Unpack(Address mapword) {
// TODO(wenyuzhao): Clear header metadata.
return mapword ^ Internals::kMapWordXorMask;
}
static constexpr bool IsPacked(Address mapword) {
return (static_cast<intptr_t>(mapword) & Internals::kMapWordXorMask) ==
Internals::kMapWordSignature &&
(0xffffffff00000000 & static_cast<intptr_t>(mapword)) != 0;
}
#else
static constexpr bool IsPacked(Address) { return false; }
#endif
private:
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;

View File

@ -10,6 +10,7 @@
#include "src/common/ptr-compr-inl.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/heap-object.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object.h"
#include "src/objects/objects.h"
#include "src/objects/slots.h"
@ -29,12 +30,32 @@ bool FullObjectSlot::contains_value(Address raw_value) const {
return base::AsAtomicPointer::Relaxed_Load(location()) == raw_value;
}
bool FullObjectSlot::contains_map_value(Address raw_value) const {
return load_map().ptr() == raw_value;
}
Object FullObjectSlot::operator*() const { return Object(*location()); }
Object FullObjectSlot::load(PtrComprCageBase cage_base) const { return **this; }
void FullObjectSlot::store(Object value) const { *location() = value.ptr(); }
void FullObjectSlot::store_map(Map map) const {
#ifdef V8_MAP_PACKING
*location() = MapWord::Pack(map.ptr());
#else
store(map);
#endif
}
Map FullObjectSlot::load_map() const {
#ifdef V8_MAP_PACKING
return Map::unchecked_cast(Object(MapWord::Unpack(*location())));
#else
return Map::unchecked_cast(Object(*location()));
#endif
}
Object FullObjectSlot::Acquire_Load() const {
return Object(base::AsAtomicPointer::Acquire_Load(location()));
}

View File

@ -108,10 +108,14 @@ class FullObjectSlot : public SlotBase<FullObjectSlot, Address> {
// Compares memory representation of a value stored in the slot with given
// raw value.
inline bool contains_value(Address raw_value) const;
inline bool contains_map_value(Address raw_value) const;
inline Object operator*() const;
inline Object load(PtrComprCageBase cage_base) const;
inline void store(Object value) const;
inline void store_map(Map map) const;
inline Map load_map() const;
inline Object Acquire_Load() const;
inline Object Acquire_Load(PtrComprCageBase cage_base) const;

View File

@ -56,6 +56,7 @@ Tagged_t TaggedField<T, kFieldOffset>::full_to_tagged(Address value) {
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::load(HeapObject host, int offset) {
Tagged_t value = *location(host, offset);
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(host.ptr(), value));
}
@ -64,6 +65,7 @@ template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::load(PtrComprCageBase cage_base,
HeapObject host, int offset) {
Tagged_t value = *location(host, offset);
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(cage_base, value));
}
@ -73,7 +75,9 @@ void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES
Relaxed_Store(host, value);
#else
*location(host) = full_to_tagged(value.ptr());
Address ptr = value.ptr();
DCHECK_NE(kFieldOffset, HeapObject::kMapOffset);
*location(host) = full_to_tagged(ptr);
#endif
}
@ -83,7 +87,9 @@ void TaggedField<T, kFieldOffset>::store(HeapObject host, int offset, T value) {
#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES
Relaxed_Store(host, offset, value);
#else
*location(host, offset) = full_to_tagged(value.ptr());
Address ptr = value.ptr();
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
*location(host, offset) = full_to_tagged(ptr);
#endif
}
@ -91,6 +97,7 @@ void TaggedField<T, kFieldOffset>::store(HeapObject host, int offset, T value) {
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Relaxed_Load(HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(host.ptr(), value));
}
@ -99,50 +106,89 @@ template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Relaxed_Load(PtrComprCageBase cage_base,
HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(cage_base, value));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, T value) {
T TaggedField<T, kFieldOffset>::Relaxed_Load_Map_Word(
PtrComprCageBase cage_base, HeapObject host) {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, 0));
return T(tagged_to_full(cage_base, value));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Relaxed_Store_Map_Word(HeapObject host,
T value) {
AsAtomicTagged::Relaxed_Store(location(host), full_to_tagged(value.ptr()));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, T value) {
Address ptr = value.ptr();
DCHECK_NE(kFieldOffset, HeapObject::kMapOffset);
AsAtomicTagged::Relaxed_Store(location(host), full_to_tagged(ptr));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, int offset,
T value) {
AsAtomicTagged::Relaxed_Store(location(host, offset),
full_to_tagged(value.ptr()));
Address ptr = value.ptr();
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
AsAtomicTagged::Relaxed_Store(location(host, offset), full_to_tagged(ptr));
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Acquire_Load(HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(host.ptr(), value));
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Acquire_Load_No_Unpack(
PtrComprCageBase cage_base, HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
return T(tagged_to_full(cage_base, value));
}
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Acquire_Load(PtrComprCageBase cage_base,
HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(cage_base, value));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Release_Store(HeapObject host, T value) {
AsAtomicTagged::Release_Store(location(host), full_to_tagged(value.ptr()));
Address ptr = value.ptr();
DCHECK_NE(kFieldOffset, HeapObject::kMapOffset);
AsAtomicTagged::Release_Store(location(host), full_to_tagged(ptr));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Release_Store_Map_Word(HeapObject host,
T value) {
Address ptr = value.ptr();
AsAtomicTagged::Release_Store(location(host), full_to_tagged(ptr));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Release_Store(HeapObject host, int offset,
T value) {
AsAtomicTagged::Release_Store(location(host, offset),
full_to_tagged(value.ptr()));
Address ptr = value.ptr();
DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
AsAtomicTagged::Release_Store(location(host, offset), full_to_tagged(ptr));
}
// static

View File

@ -52,6 +52,8 @@ class TaggedField : public AllStatic {
static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
static inline T Acquire_Load_No_Unpack(PtrComprCageBase cage_base,
HeapObject host, int offset = 0);
static inline T Acquire_Load(PtrComprCageBase cage_base, HeapObject host,
int offset = 0);
@ -61,6 +63,13 @@ class TaggedField : public AllStatic {
static inline Tagged_t Release_CompareAndSwap(HeapObject host, T old,
T value);
// Note: Use these *_Map_Word methods only when loading a MapWord from a
// MapField.
static inline T Relaxed_Load_Map_Word(PtrComprCageBase cage_base,
HeapObject host);
static inline void Relaxed_Store_Map_Word(HeapObject host, T value);
static inline void Release_Store_Map_Word(HeapObject host, T value);
private:
static inline Tagged_t* location(HeapObject host, int offset = 0);

View File

@ -162,6 +162,9 @@ class ObjectVisitor {
// Visits the relocation info using the given iterator.
virtual void VisitRelocInfo(RelocIterator* it);
// Visits the object's map pointer, decoding as necessary
virtual void VisitMapPointer(HeapObject host) { UNREACHABLE(); }
};
} // namespace internal

View File

@ -721,6 +721,13 @@ class IndexedReferencesExtractor : public ObjectVisitor {
ObjectSlot end) override {
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
void VisitMapPointer(HeapObject object) override {
if (generator_->visited_fields_[0]) {
generator_->visited_fields_[0] = false;
} else {
VisitHeapObjectImpl(object.map(), 0);
}
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
// [start,end) must be a sub-region of [parent_start_, parent_end), i.e.
@ -1500,6 +1507,7 @@ class RootsReferencesExtractor : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
VisitRootPointer(root, description, p);
}
}
@ -1673,6 +1681,7 @@ void V8HeapExplorer::SetHiddenReference(HeapObject parent_obj,
HeapEntry* parent_entry, int index,
Object child_obj, int field_offset) {
DCHECK_EQ(parent_entry, GetEntry(parent_obj));
DCHECK(!MapWord::IsPacked(child_obj.ptr()));
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != nullptr && IsEssentialObject(child_obj) &&
IsEssentialHiddenReference(parent_obj, field_offset)) {
@ -1834,6 +1843,7 @@ class GlobalObjectsEnumerator : public RootVisitor {
void VisitRootPointersImpl(Root root, const char* description, TSlot start,
TSlot end) {
for (TSlot p = start; p < end; ++p) {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load(isolate_).ptr()));
Object o = p.load(isolate_);
if (!o.IsNativeContext(isolate_)) continue;
JSObject proxy = Context::cast(o).global_proxy();

View File

@ -75,7 +75,7 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
DisallowGarbageCollection no_gc;
// Check if the area is iterable by confirming that it starts with a map.
DCHECK((*ObjectSlot(soon_object)).IsMap());
DCHECK(HeapObject::FromAddress(soon_object).map().IsMap());
HandleScope scope(isolate_);
HeapObject heap_object = HeapObject::FromAddress(soon_object);

View File

@ -17,6 +17,10 @@ const char* RootsTable::root_names_[RootsTable::kEntriesCount] = {
#undef ROOT_NAME
};
MapWord ReadOnlyRoots::one_pointer_filler_map_word() {
return MapWord::FromMap(one_pointer_filler_map());
}
void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
visitor->VisitRootPointers(Root::kReadOnlyRootList, nullptr,
FullObjectSlot(read_only_roots_),

View File

@ -536,6 +536,10 @@ class ReadOnlyRoots {
V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
V8_INLINE explicit ReadOnlyRoots(LocalIsolate* isolate);
// For `v8_enable_map_packing=true`, this will return a packed (also untagged)
// map-word instead of a tagged heap pointer.
MapWord one_pointer_filler_map_word();
#define ROOT_ACCESSOR(Type, name, CamelName) \
V8_INLINE class Type name() const; \
V8_INLINE class Type unchecked_##name() const; \

View File

@ -96,6 +96,28 @@ MaybeHandle<Object> Runtime::HasProperty(Isolate* isolate,
namespace {
// This function sets the sentinel value in a deleted field. Thes sentinel has
// to look like a proper standalone object because the slack tracking may
// complete at any time. For this reason we use the filler map word.
// If V8_MAP_PACKING is enabled, then the filler map word is a packed filler
// map. Otherwise, the filler map word is the same as the filler map.
inline void ClearField(Isolate* isolate, JSObject object, FieldIndex index) {
if (index.is_inobject()) {
MapWord filler_map_word =
ReadOnlyRoots(isolate).one_pointer_filler_map_word();
#ifndef V8_MAP_PACKING
DCHECK_EQ(filler_map_word.ToMap(),
ReadOnlyRoots(isolate).one_pointer_filler_map());
#endif
int offset = index.offset();
TaggedField<MapWord>::Release_Store(object, offset, filler_map_word);
} else {
object.property_array().set(
index.outobject_array_index(),
ReadOnlyRoots(isolate).one_pointer_filler_map());
}
}
bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
Handle<Object> raw_key) {
// This implements a special case for fast property deletion: when the
@ -165,8 +187,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Clear out the properties backing store.
receiver->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
} else {
Object filler = ReadOnlyRoots(isolate).one_pointer_filler_map();
JSObject::cast(*receiver).FastPropertyAtPut(index, filler);
ClearField(isolate, JSObject::cast(*receiver), index);
// We must clear any recorded slot for the deleted property, because
// subsequent object modifications might put a raw double there.
// Slot clearing is the reason why this entire function cannot currently

View File

@ -285,6 +285,11 @@ void GenerateFieldValueAccessor(const Field& field,
cc_contents << " d::MemoryAccessResult validity = accessor("
<< address_getter << "()" << index_offset
<< ", reinterpret_cast<uint8_t*>(&value), sizeof(value));\n";
#ifdef V8_MAP_PACKING
if (field_getter == "GetMapValue") {
cc_contents << " value = i::MapWord::Unpack(value);\n";
}
#endif
cc_contents << " return {validity, "
<< (debug_field_type.IsTagged()
? "EnsureDecompressed(value, address_)"

View File

@ -4864,7 +4864,7 @@ namespace {
void GenerateFieldValueVerifier(const std::string& class_name, bool indexed,
std::string offset, const Field& leaf_field,
std::string indexed_field_size,
std::ostream& cc_contents) {
std::ostream& cc_contents, bool is_map) {
const Type* field_type = leaf_field.name_and_type.type;
bool maybe_object =
@ -4879,8 +4879,12 @@ void GenerateFieldValueVerifier(const std::string& class_name, bool indexed,
const std::string value = leaf_field.name_and_type.name + "__value";
// Read the field.
cc_contents << " " << object_type << " " << value << " = TaggedField<"
<< object_type << ">::load(o, " << offset << ");\n";
if (is_map) {
cc_contents << " " << object_type << " " << value << " = o.map();\n";
} else {
cc_contents << " " << object_type << " " << value << " = TaggedField<"
<< object_type << ">::load(o, " << offset << ");\n";
}
// Call VerifyPointer or VerifyMaybeObjectPointer on it.
cc_contents << " " << object_type << "::" << verify_fn << "(isolate, "
@ -4947,13 +4951,13 @@ void GenerateClassFieldVerifier(const std::string& class_name,
class_name, f.index.has_value(),
field_start_offset + " + " + std::to_string(*struct_field.offset),
struct_field, std::to_string((*struct_type)->PackedSize()),
cc_contents);
cc_contents, f.name_and_type.name == "map");
}
}
} else {
GenerateFieldValueVerifier(class_name, f.index.has_value(),
field_start_offset, f, "kTaggedSize",
cc_contents);
cc_contents, f.name_and_type.name == "map");
}
cc_contents << " }\n";

View File

@ -229,10 +229,13 @@ TEST(GetObjectProperties) {
d::ObjectPropertiesResultPtr props2;
{
heap_addresses.read_only_space_first_page = 0;
uintptr_t map_ptr = props->properties[0]->address;
uintptr_t map_map_ptr = *reinterpret_cast<i::Tagged_t*>(map_ptr);
#if V8_MAP_PACKING
map_map_ptr = reinterpret_cast<i::MapWord*>(&map_map_ptr)->ToMap().ptr();
#endif
uintptr_t map_address =
d::GetObjectProperties(
*reinterpret_cast<i::Tagged_t*>(props->properties[0]->address),
&ReadMemory, heap_addresses)
d::GetObjectProperties(map_map_ptr, &ReadMemory, heap_addresses)
->properties[0]
->address;
MemoryFailureRegion failure(map_address, map_address + i::Map::kSize);
@ -338,8 +341,11 @@ TEST(GetObjectProperties) {
// Verify the result for a heap object field which is itself a struct: the
// "descriptors" field on a DescriptorArray.
// Start by getting the object's map and the map's descriptor array.
props = d::GetObjectProperties(ReadProp<i::Tagged_t>(*props, "map"),
&ReadMemory, heap_addresses);
uintptr_t map_ptr = ReadProp<i::Tagged_t>(*props, "map");
#if V8_MAP_PACKING
map_ptr = reinterpret_cast<i::MapWord*>(&map_ptr)->ToMap().ptr();
#endif
props = d::GetObjectProperties(map_ptr, &ReadMemory, heap_addresses);
props = d::GetObjectProperties(
ReadProp<i::Tagged_t>(*props, "instance_descriptors"), &ReadMemory,
heap_addresses);

View File

@ -101,8 +101,9 @@ bool IsObjectShrinkable(JSObject obj) {
int unused = obj.map().UnusedPropertyFields();
if (unused == 0) return false;
Address packed_filler = MapWord::FromMap(*filler_map).ptr();
for (int i = inobject_properties - unused; i < inobject_properties; i++) {
if (*filler_map != GetFieldValue(obj, i)) {
if (packed_filler != GetFieldValue(obj, i).ptr()) {
return false;
}
}

View File

@ -316,7 +316,10 @@ class ReadStringVisitor : public TqObjectVisitor {
bool IsExternalStringCached(const TqExternalString* object) {
// The safest way to get the instance type is to use known map pointers, in
// case the map data is not available.
uintptr_t map = GetOrFinish(object->GetMapValue(accessor_));
Value<uintptr_t> map_ptr = object->GetMapValue(accessor_);
DCHECK_IMPLIES(map_ptr.validity == d::MemoryAccessResult::kOk,
!v8::internal::MapWord::IsPacked(map_ptr.value));
uintptr_t map = GetOrFinish(map_ptr);
if (done_) return false;
auto instance_types = FindKnownMapInstanceTypes(map, heap_addresses_);
// Exactly one of the matched instance types should be a string type,
@ -500,6 +503,7 @@ class AddInfoVisitor : public TqObjectVisitor {
if (map_ptr.validity != d::MemoryAccessResult::kOk) {
return; // Can't read the JSObject. Nothing useful to do.
}
DCHECK(!v8::internal::MapWord::IsPacked(map_ptr.value));
TqMap map(map_ptr.value);
// On JSObject instances, this value is the start of in-object properties.