(Reland) Torquefy a few more types

WeakFixedArray, WeakArrayList, JSFinalizationGroup, JSFinalizationGroupCleanupIterator, WeakCell, JSWeakRef, BytecodeArray, SourcePositionWithFrameCache

Note: SourcePositionTableWithFrameCache doesn't derive from Tuple2 anymore.
Bug: v8:8952

Original CL: https://chromium-review.googlesource.com/c/v8/v8/+/1504433

Change-Id: I13f102b445c9ff3e1ebabe0cdf013c62bb6d771d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1559212
Commit-Queue: Irina Yatsenko <irinayat@microsoft.com>
Reviewed-by: Simon Zünd <szuend@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61932}
This commit is contained in:
Irina Yatsenko 2019-05-30 12:55:07 -07:00 committed by Commit Bot
parent d0cfb9d175
commit 73ad21b139
18 changed files with 117 additions and 115 deletions

View File

@ -1093,11 +1093,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a // 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid. // 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOSRNestingLevelOffset + kCharSize); BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r9, Operand(0)); __ mov(r9, Operand(0));
__ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister, __ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOSRNestingLevelOffset)); BytecodeArray::kOsrNestingLevelOffset));
// Load the initial bytecode offset. // Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister, __ mov(kInterpreterBytecodeOffsetRegister,

View File

@ -1201,10 +1201,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a // 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid. // 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOSRNestingLevelOffset + kCharSize); BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister, __ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOSRNestingLevelOffset)); BytecodeArray::kOsrNestingLevelOffset));
// Load the initial bytecode offset. // Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister, __ Mov(kInterpreterBytecodeOffsetRegister,

View File

@ -14,6 +14,7 @@
#include 'src/objects/js-generator.h' #include 'src/objects/js-generator.h'
#include 'src/objects/js-promise.h' #include 'src/objects/js-promise.h'
#include 'src/objects/js-regexp-string-iterator.h' #include 'src/objects/js-regexp-string-iterator.h'
#include 'src/objects/js-weak-refs.h'
#include 'src/objects/module.h' #include 'src/objects/module.h'
#include 'src/objects/objects.h' #include 'src/objects/objects.h'
#include 'src/objects/stack-frame-info.h' #include 'src/objects/stack-frame-info.h'
@ -37,6 +38,7 @@ extern class HeapObject extends Tagged {
} }
type Object = Smi | HeapObject; type Object = Smi | HeapObject;
type int32 generates 'TNode<Int32T>' constexpr 'int32_t'; type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t'; type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int31 extends int32 type int31 extends int32
@ -218,8 +220,6 @@ extern class Map extends HeapObject {
PrototypeInfo | Smi; PrototypeInfo | Smi;
} }
type BytecodeArray extends FixedArrayBase;
@generatePrint @generatePrint
extern class EnumCache extends Struct { extern class EnumCache extends Struct {
keys: FixedArray; keys: FixedArray;
@ -449,7 +449,6 @@ extern class CallHandlerInfo extends Struct {
} }
type JSModuleNamespace extends JSObject; type JSModuleNamespace extends JSObject;
type WeakArrayList extends HeapObject;
@abstract @abstract
@noVerifier @noVerifier
@ -481,6 +480,13 @@ extern class JSMessageObject extends JSObject {
error_level: Smi; error_level: Smi;
} }
extern class WeakArrayList extends HeapObject {
capacity: Smi;
length: Smi;
// TODO(v8:8983): declare variable-sized region for contained MaybeObject's
// objects[length]: MaybeObject;
}
extern class PrototypeInfo extends Struct { extern class PrototypeInfo extends Struct {
js_module_namespace: JSModuleNamespace | Undefined; js_module_namespace: JSModuleNamespace | Undefined;
prototype_users: WeakArrayList | Zero; prototype_users: WeakArrayList | Zero;
@ -1339,6 +1345,46 @@ extern class AsmWasmData extends Struct {
uses_bitset: HeapNumber; uses_bitset: HeapNumber;
} }
extern class JSFinalizationGroup extends JSObject {
native_context: NativeContext;
cleanup: Object;
active_cells: Undefined | WeakCell;
cleared_cells: Undefined | WeakCell;
key_map: Object;
next: Undefined | JSFinalizationGroup;
flags: Smi;
}
extern class JSFinalizationGroupCleanupIterator extends JSObject {
finalization_group: JSFinalizationGroup;
}
extern class WeakCell extends HeapObject {
finalization_group: Undefined | JSFinalizationGroup;
target: Undefined | JSReceiver;
holdings: Object;
prev: Undefined | WeakCell;
next: Undefined | WeakCell;
key: Object;
key_list_prev: Undefined | WeakCell;
key_list_next: Undefined | WeakCell;
}
extern class JSWeakRef extends JSObject { target: Undefined | JSReceiver; }
extern class BytecodeArray extends FixedArrayBase {
// TODO(v8:8983): bytecode array object sizes vary based on their contents.
constant_pool: FixedArray;
handler_table: ByteArray;
source_position_table: Undefined | ByteArray |
SourcePositionTableWithFrameCache;
frame_size: int32;
parameter_size: int32;
incoming_new_target_or_generator_register: int32;
osr_nesting_level: int8;
bytecode_age: int8;
}
extern macro Is64(): constexpr bool; extern macro Is64(): constexpr bool;
extern macro SelectBooleanConstant(bool): Boolean; extern macro SelectBooleanConstant(bool): Boolean;

View File

@ -1023,10 +1023,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a // 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid. // 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOSRNestingLevelOffset + kCharSize); BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister, __ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOSRNestingLevelOffset), BytecodeArray::kOsrNestingLevelOffset),
Immediate(0)); Immediate(0));
// Push bytecode array. // Push bytecode array.

View File

@ -1109,12 +1109,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a // 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid. // 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOSRNestingLevelOffset + kCharSize); BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ li(r8, Operand(0)); __ li(r8, Operand(0));
__ StoreHalfWord(r8, __ StoreHalfWord(r8,
FieldMemOperand(kInterpreterBytecodeArrayRegister, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOSRNestingLevelOffset), BytecodeArray::kOsrNestingLevelOffset),
r0); r0);
// Load initial bytecode offset. // Load initial bytecode offset.

View File

@ -1161,12 +1161,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a // 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid. // 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOSRNestingLevelOffset + kCharSize); BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ lghi(r1, Operand(0)); __ lghi(r1, Operand(0));
__ StoreHalfWord(r1, __ StoreHalfWord(r1,
FieldMemOperand(kInterpreterBytecodeArrayRegister, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOSRNestingLevelOffset), BytecodeArray::kOsrNestingLevelOffset),
r0); r0);
// Load the initial bytecode offset. // Load the initial bytecode offset.

View File

@ -1109,10 +1109,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a // 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid. // 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOSRNestingLevelOffset + kCharSize); BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister, __ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOSRNestingLevelOffset), BytecodeArray::kOsrNestingLevelOffset),
Immediate(0)); Immediate(0));
// Load initial bytecode offset. // Load initial bytecode offset.

View File

@ -502,6 +502,10 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
CHECK(IsBytecodeArray()); CHECK(IsBytecodeArray());
CHECK(constant_pool().IsFixedArray()); CHECK(constant_pool().IsFixedArray());
VerifyHeapPointer(isolate, constant_pool()); VerifyHeapPointer(isolate, constant_pool());
CHECK(source_position_table().IsUndefined() ||
source_position_table().IsByteArray() ||
source_position_table().IsSourcePositionTableWithFrameCache());
CHECK(handler_table().IsByteArray());
} }
USE_TORQUE_VERIFIER(FreeSpace) USE_TORQUE_VERIFIER(FreeSpace)
@ -709,6 +713,8 @@ void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
} }
void WeakArrayList::WeakArrayListVerify(Isolate* isolate) { void WeakArrayList::WeakArrayListVerify(Isolate* isolate) {
VerifySmiField(kCapacityOffset);
VerifySmiField(kLengthOffset);
for (int i = 0; i < length(); i++) { for (int i = 0; i < length(); i++) {
MaybeObject::VerifyMaybeObjectPointer(isolate, Get(i)); MaybeObject::VerifyMaybeObjectPointer(isolate, Get(i));
} }
@ -1288,6 +1294,7 @@ void JSFinalizationGroup::JSFinalizationGroupVerify(Isolate* isolate) {
if (cleared_cells().IsWeakCell()) { if (cleared_cells().IsWeakCell()) {
CHECK(WeakCell::cast(cleared_cells()).prev().IsUndefined(isolate)); CHECK(WeakCell::cast(cleared_cells()).prev().IsUndefined(isolate));
} }
CHECK(next().IsUndefined(isolate) || next().IsJSFinalizationGroup());
} }
void JSFinalizationGroupCleanupIterator:: void JSFinalizationGroupCleanupIterator::

View File

@ -1501,9 +1501,9 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
UpdateInterruptBudget(profiling_weight, true); UpdateInterruptBudget(profiling_weight, true);
} }
Node* InterpreterAssembler::LoadOSRNestingLevel() { Node* InterpreterAssembler::LoadOsrNestingLevel() {
return LoadObjectField(BytecodeArrayTaggedPointer(), return LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kOSRNestingLevelOffset, BytecodeArray::kOsrNestingLevelOffset,
MachineType::Int8()); MachineType::Int8());
} }

View File

@ -237,7 +237,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void UpdateInterruptBudgetOnReturn(); void UpdateInterruptBudgetOnReturn();
// Returns the OSR nesting level from the bytecode header. // Returns the OSR nesting level from the bytecode header.
compiler::Node* LoadOSRNestingLevel(); compiler::Node* LoadOsrNestingLevel();
// Dispatch to the bytecode. // Dispatch to the bytecode.
compiler::Node* Dispatch(); compiler::Node* Dispatch();

View File

@ -2336,7 +2336,7 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) { IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
Node* relative_jump = BytecodeOperandUImmWord(0); Node* relative_jump = BytecodeOperandUImmWord(0);
Node* loop_depth = BytecodeOperandImm(1); Node* loop_depth = BytecodeOperandImm(1);
Node* osr_level = LoadOSRNestingLevel(); Node* osr_level = LoadOsrNestingLevel();
// Check if OSR points at the given {loop_depth} are armed by comparing it to // Check if OSR points at the given {loop_depth} are armed by comparing it to
// the current {osr_level} loaded from the header of the BytecodeArray. // the current {osr_level} loaded from the header of the BytecodeArray.

View File

@ -622,32 +622,32 @@ void BytecodeArray::set(int index, byte value) {
WriteField<byte>(kHeaderSize + index * kCharSize, value); WriteField<byte>(kHeaderSize + index * kCharSize, value);
} }
void BytecodeArray::set_frame_size(int frame_size) { void BytecodeArray::set_frame_size(int32_t frame_size) {
DCHECK_GE(frame_size, 0); DCHECK_GE(frame_size, 0);
DCHECK(IsAligned(frame_size, kSystemPointerSize)); DCHECK(IsAligned(frame_size, kSystemPointerSize));
WriteField<int>(kFrameSizeOffset, frame_size); WriteField<int32_t>(kFrameSizeOffset, frame_size);
} }
int BytecodeArray::frame_size() const { int32_t BytecodeArray::frame_size() const {
return ReadField<int>(kFrameSizeOffset); return ReadField<int32_t>(kFrameSizeOffset);
} }
int BytecodeArray::register_count() const { int BytecodeArray::register_count() const {
return frame_size() / kSystemPointerSize; return static_cast<int>(frame_size()) / kSystemPointerSize;
} }
void BytecodeArray::set_parameter_count(int number_of_parameters) { void BytecodeArray::set_parameter_count(int32_t number_of_parameters) {
DCHECK_GE(number_of_parameters, 0); DCHECK_GE(number_of_parameters, 0);
// Parameter count is stored as the size on stack of the parameters to allow // Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code. // it to be used directly by generated code.
WriteField<int>(kParameterSizeOffset, WriteField<int32_t>(kParameterSizeOffset,
(number_of_parameters << kSystemPointerSizeLog2)); (number_of_parameters << kSystemPointerSizeLog2));
} }
interpreter::Register BytecodeArray::incoming_new_target_or_generator_register() interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
const { const {
int register_operand = int32_t register_operand =
ReadField<int>(kIncomingNewTargetOrGeneratorRegisterOffset); ReadField<int32_t>(kIncomingNewTargetOrGeneratorRegisterOffset);
if (register_operand == 0) { if (register_operand == 0) {
return interpreter::Register::invalid_value(); return interpreter::Register::invalid_value();
} else { } else {
@ -658,24 +658,24 @@ interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
void BytecodeArray::set_incoming_new_target_or_generator_register( void BytecodeArray::set_incoming_new_target_or_generator_register(
interpreter::Register incoming_new_target_or_generator_register) { interpreter::Register incoming_new_target_or_generator_register) {
if (!incoming_new_target_or_generator_register.is_valid()) { if (!incoming_new_target_or_generator_register.is_valid()) {
WriteField<int>(kIncomingNewTargetOrGeneratorRegisterOffset, 0); WriteField<int32_t>(kIncomingNewTargetOrGeneratorRegisterOffset, 0);
} else { } else {
DCHECK(incoming_new_target_or_generator_register.index() < DCHECK(incoming_new_target_or_generator_register.index() <
register_count()); register_count());
DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand()); DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand());
WriteField<int>(kIncomingNewTargetOrGeneratorRegisterOffset, WriteField<int32_t>(kIncomingNewTargetOrGeneratorRegisterOffset,
incoming_new_target_or_generator_register.ToOperand()); incoming_new_target_or_generator_register.ToOperand());
} }
} }
int BytecodeArray::osr_loop_nesting_level() const { int BytecodeArray::osr_loop_nesting_level() const {
return ReadField<int8_t>(kOSRNestingLevelOffset); return ReadField<int8_t>(kOsrNestingLevelOffset);
} }
void BytecodeArray::set_osr_loop_nesting_level(int depth) { void BytecodeArray::set_osr_loop_nesting_level(int depth) {
DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker); DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8); STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
WriteField<int8_t>(kOSRNestingLevelOffset, depth); WriteField<int8_t>(kOsrNestingLevelOffset, depth);
} }
BytecodeArray::Age BytecodeArray::bytecode_age() const { BytecodeArray::Age BytecodeArray::bytecode_age() const {
@ -691,10 +691,10 @@ void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
RELAXED_WRITE_INT8_FIELD(*this, kBytecodeAgeOffset, static_cast<int8_t>(age)); RELAXED_WRITE_INT8_FIELD(*this, kBytecodeAgeOffset, static_cast<int8_t>(age));
} }
int BytecodeArray::parameter_count() const { int32_t BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow // Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code. // it to be used directly by generated code.
return ReadField<int>(kParameterSizeOffset) >> kSystemPointerSizeLog2; return ReadField<int32_t>(kParameterSizeOffset) >> kSystemPointerSizeLog2;
} }
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset) ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)

View File

@ -830,7 +830,8 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << reinterpret_cast<const void*>(current_address) << " @ " os << reinterpret_cast<const void*>(current_address) << " @ "
<< std::setw(4) << iterator.current_offset() << " : "; << std::setw(4) << iterator.current_offset() << " : ";
interpreter::BytecodeDecoder::Decode( interpreter::BytecodeDecoder::Decode(
os, reinterpret_cast<byte*>(current_address), parameter_count()); os, reinterpret_cast<byte*>(current_address),
static_cast<int>(parameter_count()));
if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) { if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
Address jump_target = base_address + iterator.GetJumpTargetOffset(); Address jump_target = base_address + iterator.GetJumpTargetOffset();
os << " (" << reinterpret_cast<void*>(jump_target) << " @ " os << " (" << reinterpret_cast<void*>(jump_target) << " @ "

View File

@ -741,15 +741,15 @@ class BytecodeArray : public FixedArrayBase {
inline Address GetFirstBytecodeAddress(); inline Address GetFirstBytecodeAddress();
// Accessors for frame size. // Accessors for frame size.
inline int frame_size() const; inline int32_t frame_size() const;
inline void set_frame_size(int frame_size); inline void set_frame_size(int32_t frame_size);
// Accessor for register count (derived from frame_size). // Accessor for register count (derived from frame_size).
inline int register_count() const; inline int register_count() const;
// Accessors for parameter count (including implicit 'this' receiver). // Accessors for parameter count (including implicit 'this' receiver).
inline int parameter_count() const; inline int32_t parameter_count() const;
inline void set_parameter_count(int number_of_parameters); inline void set_parameter_count(int32_t number_of_parameters);
// Register used to pass the incoming new.target or generator object from the // Register used to pass the incoming new.target or generator object from the
// fucntion call. // fucntion call.
@ -828,28 +828,15 @@ class BytecodeArray : public FixedArrayBase {
// Compares only the bytecode array but not any of the header fields. // Compares only the bytecode array but not any of the header fields.
bool IsBytecodeEqual(const BytecodeArray other) const; bool IsBytecodeEqual(const BytecodeArray other) const;
// Layout description. // Layout description.
#define BYTECODE_ARRAY_FIELDS(V) \
/* Pointer fields. */ \
V(kConstantPoolOffset, kTaggedSize) \
V(kHandlerTableOffset, kTaggedSize) \
V(kSourcePositionTableOffset, kTaggedSize) \
V(kFrameSizeOffset, kIntSize) \
V(kParameterSizeOffset, kIntSize) \
V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \
V(kOSRNestingLevelOffset, kCharSize) \
V(kBytecodeAgeOffset, kCharSize) \
/* Total size. */ \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize, DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
BYTECODE_ARRAY_FIELDS) TORQUE_GENERATED_BYTECODE_ARRAY_FIELDS)
#undef BYTECODE_ARRAY_FIELDS static constexpr int kHeaderSize = kSize;
// InterpreterEntryTrampoline expects these fields to be next to each other // InterpreterEntryTrampoline expects these fields to be next to each other
// and writes a 16-bit value to reset them. // and writes a 16-bit value to reset them.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
kOSRNestingLevelOffset + kCharSize); kOsrNestingLevelOffset + kCharSize);
// Maximal memory consumption for a single BytecodeArray. // Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB; static const int kMaxSize = 512 * MB;

View File

@ -364,14 +364,9 @@ class WeakArrayList : public HeapObject {
// Layout description. // Layout description.
#define WEAK_ARRAY_LIST_FIELDS(V) \ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
V(kCapacityOffset, kTaggedSize) \ TORQUE_GENERATED_WEAK_ARRAY_LIST_FIELDS)
V(kLengthOffset, kTaggedSize) \ static constexpr int kHeaderSize = kSize;
/* Header size. */ \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WEAK_ARRAY_LIST_FIELDS)
#undef WEAK_ARRAY_LIST_FIELDS
using BodyDescriptor = WeakArrayBodyDescriptor; using BodyDescriptor = WeakArrayBodyDescriptor;

View File

@ -60,21 +60,9 @@ class JSFinalizationGroup : public JSObject {
static void Cleanup(Handle<JSFinalizationGroup> finalization_group, static void Cleanup(Handle<JSFinalizationGroup> finalization_group,
Isolate* isolate); Isolate* isolate);
// Layout description. // Layout description.
#define JS_FINALIZATION_GROUP_FIELDS(V) \
V(kNativeContextOffset, kTaggedSize) \
V(kCleanupOffset, kTaggedSize) \
V(kActiveCellsOffset, kTaggedSize) \
V(kClearedCellsOffset, kTaggedSize) \
V(kKeyMapOffset, kTaggedSize) \
V(kNextOffset, kTaggedSize) \
V(kFlagsOffset, kTaggedSize) \
/* Header size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
JS_FINALIZATION_GROUP_FIELDS) TORQUE_GENERATED_JSFINALIZATION_GROUP_FIELDS)
#undef JS_FINALIZATION_GROUP_FIELDS
// Bitfields in flags. // Bitfields in flags.
class ScheduledForCleanupField : public BitField<bool, 0, 1> {}; class ScheduledForCleanupField : public BitField<bool, 0, 1> {};
@ -106,21 +94,9 @@ class WeakCell : public HeapObject {
DECL_ACCESSORS(key_list_prev, Object) DECL_ACCESSORS(key_list_prev, Object)
DECL_ACCESSORS(key_list_next, Object) DECL_ACCESSORS(key_list_next, Object)
// Layout description. // Layout description.
#define WEAK_CELL_FIELDS(V) \ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
V(kFinalizationGroupOffset, kTaggedSize) \ TORQUE_GENERATED_WEAK_CELL_FIELDS)
V(kTargetOffset, kTaggedSize) \
V(kHoldingsOffset, kTaggedSize) \
V(kPrevOffset, kTaggedSize) \
V(kNextOffset, kTaggedSize) \
V(kKeyOffset, kTaggedSize) \
V(kKeyListPrevOffset, kTaggedSize) \
V(kKeyListNextOffset, kTaggedSize) \
/* Header size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WEAK_CELL_FIELDS)
#undef WEAK_CELL_FIELDS
class BodyDescriptor; class BodyDescriptor;
@ -146,14 +122,9 @@ class JSWeakRef : public JSObject {
DECL_ACCESSORS(target, HeapObject) DECL_ACCESSORS(target, HeapObject)
// Layout description. // Layout description.
#define JS_WEAK_REF_FIELDS(V) \ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
V(kTargetOffset, kTaggedSize) \ TORQUE_GENERATED_JSWEAK_REF_FIELDS)
/* Header size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_WEAK_REF_FIELDS)
#undef JS_WEAK_REF_FIELDS
class BodyDescriptor; class BodyDescriptor;
@ -189,15 +160,10 @@ class JSFinalizationGroupCleanupIterator : public JSObject {
DECL_ACCESSORS(finalization_group, JSFinalizationGroup) DECL_ACCESSORS(finalization_group, JSFinalizationGroup)
// Layout description. // Layout description.
#define JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS(V) \ DEFINE_FIELD_OFFSET_CONSTANTS(
V(kFinalizationGroupOffset, kTaggedSize) \ JSObject::kHeaderSize,
/* Header size. */ \ TORQUE_GENERATED_JSFINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS)
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS)
#undef JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS
OBJECT_CONSTRUCTORS(JSFinalizationGroupCleanupIterator, JSObject); OBJECT_CONSTRUCTORS(JSFinalizationGroupCleanupIterator, JSObject);
}; };

View File

@ -604,8 +604,8 @@ TEST(BytecodeArray) {
if (FLAG_never_compact) return; if (FLAG_never_compact) return;
static const uint8_t kRawBytes[] = {0xC3, 0x7E, 0xA5, 0x5A}; static const uint8_t kRawBytes[] = {0xC3, 0x7E, 0xA5, 0x5A};
static const int kRawBytesSize = sizeof(kRawBytes); static const int kRawBytesSize = sizeof(kRawBytes);
static const int kFrameSize = 32; static const int32_t kFrameSize = 32;
static const int kParameterCount = 2; static const int32_t kParameterCount = 2;
ManualGCScope manual_gc_scope; ManualGCScope manual_gc_scope;
FLAG_manual_evacuation_candidates_selection = true; FLAG_manual_evacuation_candidates_selection = true;
@ -666,8 +666,8 @@ TEST(BytecodeArray) {
TEST(BytecodeArrayAging) { TEST(BytecodeArrayAging) {
static const uint8_t kRawBytes[] = {0xC3, 0x7E, 0xA5, 0x5A}; static const uint8_t kRawBytes[] = {0xC3, 0x7E, 0xA5, 0x5A};
static const int kRawBytesSize = sizeof(kRawBytes); static const int kRawBytesSize = sizeof(kRawBytes);
static const int kFrameSize = 32; static const int32_t kFrameSize = 32;
static const int kParameterCount = 2; static const int32_t kParameterCount = 2;
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory(); Factory* factory = isolate->factory();

View File

@ -306,7 +306,7 @@ void BytecodeExpectationsPrinter::PrintConstant(
void BytecodeExpectationsPrinter::PrintFrameSize( void BytecodeExpectationsPrinter::PrintFrameSize(
std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const { std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
int frame_size = bytecode_array->frame_size(); int32_t frame_size = bytecode_array->frame_size();
DCHECK(IsAligned(frame_size, kSystemPointerSize)); DCHECK(IsAligned(frame_size, kSystemPointerSize));
stream << "frame size: " << frame_size / kSystemPointerSize stream << "frame size: " << frame_size / kSystemPointerSize