Torquefy a few types

AllocationMemento, CoverageInfo, DebugInfo, DescriptorArray, FeedbackCell, FeedbackVector

Bug: v8:8952
Change-Id: I17297706a8d9bd4a0ee01b0b133ca613dbc31cf9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1521910
Commit-Queue: Irina Yatsenko <irinayat@microsoft.com>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61026}
This commit is contained in:
Irina Yatsenko 2019-04-25 12:47:40 -07:00 committed by Commit Bot
parent 8dbfc148f6
commit 9a3d5dd23c
22 changed files with 114 additions and 93 deletions

View File

@ -887,7 +887,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ldr(
optimized_code_entry,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code

View File

@ -1009,7 +1009,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is at a weak reference to a code

View File

@ -165,6 +165,14 @@ extern class SourcePositionTableWithFrameCache extends Struct {
stack_frame_cache: Object;
}
extern class DescriptorArray extends HeapObject {
number_of_all_descriptors: uint16;
number_of_descriptors: uint16;
raw_number_of_marked_descriptors: uint16;
filler16_bits: uint16;
enum_cache: EnumCache;
}
// These intrinsics should never be called from Torque code. They're used
// internally by the 'new' operator and only declared here because it's simpler
// than building the definition from C++.
@ -1051,6 +1059,27 @@ extern class DebugInfo extends Struct {
coverage_info: CoverageInfo | Undefined;
}
extern class FeedbackVector extends HeapObject {
shared_function_info: SharedFunctionInfo;
// TODO(v8:9108): currently no support for MaybeObject in Torque
optimized_code_weak_or_smi: Object;
closure_feedback_cell_array: FixedArray;
length: int32;
invocation_count: int32;
profiler_ticks: int32;
deopt_count: int32;
}
extern class FeedbackCell extends Struct {
value: Undefined | FeedbackVector | FixedArray;
interrupt_budget: int32;
}
type AllocationSite extends Struct;
extern class AllocationMemento extends Struct {
allocation_site: AllocationSite;
}
extern class WasmModuleObject extends JSObject {
native_module: Foreign;
export_wrappers: FixedArray;

View File

@ -44,7 +44,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
Label fallthrough(this);
TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
feedback_vector, FeedbackVector::kOptimizedCodeOffset);
feedback_vector, FeedbackVector::kOptimizedCodeWeakOrSmiOffset);
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code

View File

@ -799,7 +799,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = scratch;
__ mov(optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code

View File

@ -874,7 +874,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry = scratch1;
__ lw(optimized_code_entry,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak cell to a code

View File

@ -891,7 +891,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry = scratch1;
__ Ld(optimized_code_entry,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code

View File

@ -906,7 +906,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadP(
optimized_code_entry,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code

View File

@ -960,7 +960,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadP(
optimized_code_entry,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code

View File

@ -899,7 +899,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset),
FieldOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset),
decompr_scratch);
// Check if the code entry is a Smi. If yes, we interpret it as an

View File

@ -102,7 +102,8 @@ Handle<FeedbackCell> ClosureFeedbackCellArray::GetFeedbackCell(int index) {
ACCESSORS(FeedbackVector, shared_function_info, SharedFunctionInfo,
kSharedFunctionInfoOffset)
WEAK_ACCESSORS(FeedbackVector, optimized_code_weak_or_smi, kOptimizedCodeOffset)
WEAK_ACCESSORS(FeedbackVector, optimized_code_weak_or_smi,
kOptimizedCodeWeakOrSmiOffset)
ACCESSORS(FeedbackVector, closure_feedback_cell_array, ClosureFeedbackCellArray,
kClosureFeedbackCellArrayOffset)
INT32_ACCESSORS(FeedbackVector, length, kLengthOffset)

View File

@ -311,21 +311,11 @@ class FeedbackVector : public HeapObject {
// garbage collection (e.g., for patching the cache).
static inline Symbol RawUninitializedSentinel(Isolate* isolate);
// Layout description.
#define FEEDBACK_VECTOR_FIELDS(V) \
/* Header fields. */ \
V(kSharedFunctionInfoOffset, kTaggedSize) \
V(kOptimizedCodeOffset, kTaggedSize) \
V(kClosureFeedbackCellArrayOffset, kTaggedSize) \
V(kLengthOffset, kInt32Size) \
V(kInvocationCountOffset, kInt32Size) \
V(kProfilerTicksOffset, kInt32Size) \
V(kDeoptCountOffset, kInt32Size) \
V(kUnalignedHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FEEDBACK_VECTOR_FIELDS)
#undef FEEDBACK_VECTOR_FIELDS
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_FEEDBACK_VECTOR_FIELDS)
static constexpr int kUnalignedHeaderSize = kSize;
static const int kHeaderSize =
RoundUp<kObjectAlignment>(int{kUnalignedHeaderSize});
static const int kFeedbackSlotsOffset = kHeaderSize;

View File

@ -1882,8 +1882,8 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
HeapObject result = AllocateRawWithImmortalMap(
FeedbackCell::kSize, AllocationType::kOld, *no_closures_cell_map());
HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
AllocationType::kOld, *no_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
@ -1893,8 +1893,8 @@ Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
HeapObject result = AllocateRawWithImmortalMap(
FeedbackCell::kSize, AllocationType::kOld, *one_closure_cell_map());
HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
AllocationType::kOld, *one_closure_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
@ -1904,8 +1904,8 @@ Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
HeapObject result = AllocateRawWithImmortalMap(
FeedbackCell::kSize, AllocationType::kOld, *many_closures_cell_map());
HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
AllocationType::kOld, *many_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());

View File

@ -452,11 +452,14 @@ bool Heap::CreateInitialMaps() {
// The "no closures" and "one closure" FeedbackCell maps need
// to be marked unstable because their objects can change maps.
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_closures_cell)
ALLOCATE_MAP(
FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, no_closures_cell)
roots.no_closures_cell_map()->mark_unstable();
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
ALLOCATE_MAP(
FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, one_closure_cell)
roots.one_closure_cell_map()->mark_unstable();
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
ALLOCATE_MAP(
FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, many_closures_cell)
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)

View File

@ -471,7 +471,7 @@ class FeedbackVector::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset == kSharedFunctionInfoOffset ||
offset == kOptimizedCodeOffset ||
offset == kOptimizedCodeWeakOrSmiOffset ||
offset == kClosureFeedbackCellArrayOffset ||
offset >= kFeedbackSlotsOffset;
}
@ -480,7 +480,7 @@ class FeedbackVector::BodyDescriptor final : public BodyDescriptorBase {
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointer(obj, kSharedFunctionInfoOffset, v);
IterateMaybeWeakPointer(obj, kOptimizedCodeOffset, v);
IterateMaybeWeakPointer(obj, kOptimizedCodeWeakOrSmiOffset, v);
IteratePointer(obj, kClosureFeedbackCellArrayOffset, v);
IterateMaybeWeakPointers(obj, kFeedbackSlotsOffset, object_size, v);
}

View File

@ -2107,6 +2107,10 @@ void ObjectTemplateInfo::ObjectTemplateInfoVerify(Isolate* isolate) {
void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
CHECK(IsAllocationSite());
CHECK(dependent_code()->IsDependentCode());
CHECK(transition_info_or_boilerplate()->IsSmi() ||
transition_info_or_boilerplate()->IsJSObject());
CHECK(nested_site()->IsAllocationSite() || nested_site() == Smi::kZero);
}
void AllocationMemento::AllocationMementoVerify(Isolate* isolate) {
@ -2153,10 +2157,15 @@ void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) {
void DebugInfo::DebugInfoVerify(Isolate* isolate) {
CHECK(IsDebugInfo());
VerifyPointer(isolate, shared());
VerifyPointer(isolate, script());
VerifyPointer(isolate, original_bytecode_array());
VerifyPointer(isolate, break_points());
VerifySmiField(kFlagsOffset);
VerifySmiField(kDebuggerHintsOffset);
CHECK(shared()->IsSharedFunctionInfo());
CHECK(script()->IsUndefined(isolate) || script()->IsScript());
CHECK(original_bytecode_array()->IsUndefined(isolate) ||
original_bytecode_array()->IsBytecodeArray());
CHECK(debug_bytecode_array()->IsUndefined(isolate) ||
debug_bytecode_array()->IsBytecodeArray());
CHECK(break_points()->IsFixedArray());
}
void StackTraceFrame::StackTraceFrameVerify(Isolate* isolate) {

View File

@ -134,25 +134,25 @@ class AllocationSite : public Struct {
static bool ShouldTrack(ElementsKind from, ElementsKind to);
static inline bool CanTrack(InstanceType type);
// Layout description.
// AllocationSite has to start with TransitionInfoOrboilerPlateOffset
// and end with WeakNext field.
#define ALLOCATION_SITE_FIELDS(V) \
V(kStartOffset, 0) \
V(kTransitionInfoOrBoilerplateOffset, kTaggedSize) \
V(kNestedSiteOffset, kTaggedSize) \
V(kDependentCodeOffset, kTaggedSize) \
V(kCommonPointerFieldEndOffset, 0) \
V(kPretenureDataOffset, kInt32Size) \
V(kPretenureCreateCountOffset, kInt32Size) \
/* Size of AllocationSite without WeakNext field */ \
V(kSizeWithoutWeakNext, 0) \
V(kWeakNextOffset, kTaggedSize) \
/* Size of AllocationSite with WeakNext field */ \
V(kSizeWithWeakNext, 0)
// Layout description.
// AllocationSite has to start with TransitionInfoOrboilerPlateOffset
// and end with WeakNext field.
#define ALLOCATION_SITE_FIELDS(V) \
V(kStartOffset, 0) \
V(kTransitionInfoOrBoilerplateOffset, kTaggedSize) \
V(kNestedSiteOffset, kTaggedSize) \
V(kDependentCodeOffset, kTaggedSize) \
V(kCommonPointerFieldEndOffset, 0) \
V(kPretenureDataOffset, kInt32Size) \
V(kPretenureCreateCountOffset, kInt32Size) \
/* Size of AllocationSite without WeakNext field */ \
V(kSizeWithoutWeakNext, 0) \
V(kWeakNextOffset, kTaggedSize) \
/* Size of AllocationSite with WeakNext field */ \
V(kSizeWithWeakNext, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ALLOCATION_SITE_FIELDS)
#undef ALLOCATION_SITE_FIELDS
#undef ALLOCATION_SITE_FIELDS
class BodyDescriptor;
@ -164,14 +164,9 @@ class AllocationSite : public Struct {
class AllocationMemento : public Struct {
public:
// Layout description.
#define ALLOCATION_MEMENTO_FIELDS(V) \
V(kAllocationSiteOffset, kTaggedSize) \
V(kSize, 0)
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
ALLOCATION_MEMENTO_FIELDS)
#undef ALLOCATION_MEMENTO_FIELDS
TORQUE_GENERATED_ALLOCATION_MEMENTO_FIELDS)
DECL_ACCESSORS(allocation_site, Object)

View File

@ -92,7 +92,7 @@ int DescriptorArray::SearchWithCache(Isolate* isolate, Name name, Map map) {
}
ObjectSlot DescriptorArray::GetFirstPointerSlot() {
return RawField(DescriptorArray::kPointersStartOffset);
return RawField(DescriptorArray::kStartOfPointerFieldsOffset);
}
ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {

View File

@ -139,20 +139,11 @@ class DescriptorArray : public HeapObject {
static const int kNotFound = -1;
// Layout description.
#define DESCRIPTOR_ARRAY_FIELDS(V) \
V(kNumberOfAllDescriptorsOffset, kUInt16Size) \
V(kNumberOfDescriptorsOffset, kUInt16Size) \
V(kRawNumberOfMarkedDescriptorsOffset, kUInt16Size) \
V(kFiller16BitsOffset, kUInt16Size) \
V(kPointersStartOffset, 0) \
V(kEnumCacheOffset, kTaggedSize) \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
DESCRIPTOR_ARRAY_FIELDS)
#undef DESCRIPTOR_ARRAY_FIELDS
TORQUE_GENERATED_DESCRIPTOR_ARRAY_FIELDS)
static constexpr int kHeaderSize = kSize;
STATIC_ASSERT(IsAligned(kPointersStartOffset, kTaggedSize));
STATIC_ASSERT(IsAligned(kStartOfPointerFieldsOffset, kTaggedSize));
STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
// Garbage collection support.
@ -174,7 +165,8 @@ class DescriptorArray : public HeapObject {
inline ObjectSlot GetKeySlot(int descriptor);
inline MaybeObjectSlot GetValueSlot(int descriptor);
using BodyDescriptor = FlexibleWeakBodyDescriptor<kPointersStartOffset>;
using BodyDescriptor =
FlexibleWeakBodyDescriptor<kStartOfPointerFieldsOffset>;
// Layout of descriptor.
// Naming is consistent with Dictionary classes for easy templating.

View File

@ -25,10 +25,10 @@ ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset)
INT32_ACCESSORS(FeedbackCell, interrupt_budget, kInterruptBudgetOffset)
void FeedbackCell::clear_padding() {
if (FeedbackCell::kSize == FeedbackCell::kUnalignedSize) return;
DCHECK_GE(FeedbackCell::kSize, FeedbackCell::kUnalignedSize);
if (FeedbackCell::kAlignedSize == FeedbackCell::kUnalignedSize) return;
DCHECK_GE(FeedbackCell::kAlignedSize, FeedbackCell::kUnalignedSize);
memset(reinterpret_cast<byte*>(address() + FeedbackCell::kUnalignedSize), 0,
FeedbackCell::kSize - FeedbackCell::kUnalignedSize);
FeedbackCell::kAlignedSize - FeedbackCell::kUnalignedSize);
}
} // namespace internal

View File

@ -37,23 +37,17 @@ class FeedbackCell : public Struct {
DECL_PRINTER(FeedbackCell)
DECL_VERIFIER(FeedbackCell)
// Layout description.
#define FEEDBACK_CELL_FIELDS(V) \
V(kValueOffset, kTaggedSize) \
/* Non-pointer fields */ \
V(kInterruptBudgetOffset, kInt32Size) \
/* Total size. */ \
V(kUnalignedSize, 0)
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_FEEDBACK_CELL_FIELDS)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FEEDBACK_CELL_FIELDS)
#undef FEEDBACK_CELL_FIELDS
static const int kSize = RoundUp<kObjectAlignment>(int{kUnalignedSize});
static const int kUnalignedSize = kSize;
static const int kAlignedSize = RoundUp<kObjectAlignment>(int{kSize});
inline void clear_padding();
using BodyDescriptor =
FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kSize>;
FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kAlignedSize>;
OBJECT_CONSTRUCTORS(FeedbackCell, Struct);
};

View File

@ -1232,7 +1232,7 @@ void V8HeapExplorer::ExtractFeedbackVectorReferences(
HeapObject code_heap_object;
if (code->GetHeapObjectIfWeak(&code_heap_object)) {
SetWeakReference(entry, "optimized code", code_heap_object,
FeedbackVector::kOptimizedCodeOffset);
FeedbackVector::kOptimizedCodeWeakOrSmiOffset);
}
}