[cleanup] Make tq field names match C++ accessor names

I've noticed a few places where class fields as defined in Torque have
different names than the corresponding accessors in the C++ class. I
think they should match. Most of this change is just mechanically
updating the various places that use k##Field##Offset for those fields.

Change-Id: I8ba52aed7f6a1cd6b2d71158f71150b66c2c0da0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3027263
Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75796}
This commit is contained in:
Seth Brenith 2021-07-19 12:18:05 -07:00 committed by V8 LUCI CQ
parent 0734909020
commit daa7abe3ea
21 changed files with 63 additions and 61 deletions

View File

@ -1868,7 +1868,7 @@ void BaselineCompiler::VisitJumpLoop() {
Register osr_level = scratch;
__ LoadRegister(osr_level, interpreter::Register::bytecode_array());
__ LoadByteField(osr_level, osr_level,
BytecodeArray::kOsrNestingLevelOffset);
BytecodeArray::kOsrLoopNestingLevelOffset);
int loop_depth = iterator().GetImmediateOperand(1);
__ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_level, loop_depth,
&osr_not_armed);

View File

@ -1113,14 +1113,15 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
__ strh(scratch, FieldMemOperand(bytecodeArray,
BytecodeArray::kOsrNestingLevelOffset));
__ strh(scratch,
FieldMemOperand(bytecodeArray,
BytecodeArray::kOsrLoopNestingLevelOffset));
}
__ Push(argc, bytecodeArray);
@ -1266,11 +1267,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r9, Operand(0));
__ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
@ -3601,8 +3602,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
__ strh(scratch, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
__ strh(scratch,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {

View File

@ -1297,10 +1297,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(bytecode_array,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecode_array);
@ -1456,10 +1456,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
// Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister,
@ -4130,7 +4130,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else {
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);

View File

@ -138,7 +138,7 @@ transitioning builtin ToObject(implicit context: Context)(input: JSAny):
}
case (o: JSAnyNotSmi): {
const index: intptr = Convert<intptr>(
o.map.in_object_properties_start_or_constructor_function_index);
o.map.inobject_properties_start_or_constructor_function_index);
if (index != kNoConstructorFunctionIndex)
goto WrapPrimitive(
%RawDownCast<Slot<NativeContext, JSFunction>>(index));

View File

@ -1102,10 +1102,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset),
BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
// Push bytecode array.
@ -1725,10 +1725,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(
FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
FieldOperand(bytecode_array, BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
__ Push(bytecode_array);
@ -4236,7 +4236,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset),
BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
Generate_OSREntry(masm, code_obj);
} else {

View File

@ -1085,10 +1085,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(bytecodeArray,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecodeArray);
@ -1243,10 +1243,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@ -4061,7 +4061,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Lw(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {

View File

@ -1097,10 +1097,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sh(zero_reg, FieldMemOperand(bytecodeArray,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecodeArray);
@ -1255,10 +1255,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@ -3645,7 +3645,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {

View File

@ -1129,12 +1129,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ li(r8, Operand(0));
__ StoreU16(r8,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset),
BytecodeArray::kOsrLoopNestingLevelOffset),
r0);
// Load initial bytecode offset.

View File

@ -1149,10 +1149,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sh(zero_reg, FieldMemOperand(bytecodeArray,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
__ Push(argc, bytecodeArray);
@ -1315,10 +1315,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@ -3762,7 +3762,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {

View File

@ -1174,12 +1174,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r1, Operand(0));
__ StoreU16(r1,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset),
BytecodeArray::kOsrLoopNestingLevelOffset),
r0);
// Load the initial bytecode offset.

View File

@ -1187,10 +1187,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset),
BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
// Load initial bytecode offset.
@ -1704,10 +1704,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// are 8-bit fields next to each other, so we could just optimize by
// writing a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(
FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
__ movw(FieldOperand(bytecode_array,
BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
__ Push(bytecode_array);
@ -4498,7 +4498,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset),
BytecodeArray::kOsrLoopNestingLevelOffset),
Immediate(0));
Generate_OSREntry(masm, code_obj);
} else {

View File

@ -1891,7 +1891,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
// See Map::GetInObjectPropertiesStartInWords() for details.
CSA_ASSERT(this, IsJSObjectMap(map));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset));
map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset));
}
TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
@ -1899,7 +1899,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
// See Map::GetConstructorFunctionIndex() for details.
CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset));
map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset));
}
TNode<Object> CodeStubAssembler::LoadMapConstructor(TNode<Map> map) {

View File

@ -1291,7 +1291,7 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
TNode<Int8T> InterpreterAssembler::LoadOsrNestingLevel() {
return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(),
BytecodeArray::kOsrNestingLevelOffset);
BytecodeArray::kOsrLoopNestingLevelOffset);
}
void InterpreterAssembler::Abort(AbortReason abort_reason) {

View File

@ -915,13 +915,13 @@ void BytecodeArray::set_incoming_new_target_or_generator_register(
}
int BytecodeArray::osr_loop_nesting_level() const {
return ReadField<int8_t>(kOsrNestingLevelOffset);
return ReadField<int8_t>(kOsrLoopNestingLevelOffset);
}
void BytecodeArray::set_osr_loop_nesting_level(int depth) {
DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
WriteField<int8_t>(kOsrNestingLevelOffset, depth);
WriteField<int8_t>(kOsrLoopNestingLevelOffset, depth);
}
BytecodeArray::Age BytecodeArray::bytecode_age() const {

View File

@ -942,7 +942,7 @@ class BytecodeArray : public FixedArrayBase {
// InterpreterEntryTrampoline expects these fields to be next to each other
// and writes a 16-bit value to reset them.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
kOsrNestingLevelOffset + kCharSize);
kOsrLoopNestingLevelOffset + kCharSize);
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;

View File

@ -12,7 +12,7 @@ extern class BytecodeArray extends FixedArrayBase {
frame_size: int32;
parameter_size: int32;
incoming_new_target_or_generator_register: int32;
osr_nesting_level: int8;
osr_loop_nesting_level: int8;
bytecode_age: int8;
}

View File

@ -9,7 +9,7 @@ extern class DataHandler extends Struct {
validity_cell: Smi|Cell;
// Space for the following fields may or may not be allocated.
@noVerifier data_1: MaybeObject;
@noVerifier data_2: MaybeObject;
@noVerifier data_3: MaybeObject;
@noVerifier data1: MaybeObject;
@noVerifier data2: MaybeObject;
@noVerifier data3: MaybeObject;
}

View File

@ -290,14 +290,14 @@ int Map::inobject_properties_start_or_constructor_function_index() const {
// TODO(solanes, v8:7790, v8:11353): Make this and the setter non-atomic
// when TSAN sees the map's store synchronization.
return RELAXED_READ_BYTE_FIELD(
*this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
*this, kInobjectPropertiesStartOrConstructorFunctionIndexOffset);
}
void Map::set_inobject_properties_start_or_constructor_function_index(
int value) {
CHECK_LT(static_cast<unsigned>(value), 256);
RELAXED_WRITE_BYTE_FIELD(
*this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
*this, kInobjectPropertiesStartOrConstructorFunctionIndexOffset,
static_cast<byte>(value));
}

View File

@ -53,7 +53,7 @@ extern class Map extends HeapObject {
}
instance_size_in_words: uint8;
in_object_properties_start_or_constructor_function_index: uint8;
inobject_properties_start_or_constructor_function_index: uint8;
used_or_unused_instance_size_in_words: uint8;
visitor_id: uint8;
instance_type: InstanceType;

View File

@ -509,7 +509,7 @@ class AddInfoVisitor : public TqObjectVisitor {
// On JSObject instances, this value is the start of in-object properties.
// The constructor function index option is only for primitives.
auto start_offset =
map.GetInObjectPropertiesStartOrConstructorFunctionIndexValue(
map.GetInobjectPropertiesStartOrConstructorFunctionIndexValue(
accessor_);
// The total size of the object in memory. This may include over-allocated

View File

@ -259,7 +259,7 @@ extras_accessors = [
'JSTypedArray, external_pointer, uintptr_t, kExternalPointerOffset',
'JSTypedArray, length, Object, kLengthOffset',
'Map, instance_size_in_words, char, kInstanceSizeInWordsOffset',
'Map, inobject_properties_start_or_constructor_function_index, char, kInObjectPropertiesStartOrConstructorFunctionIndexOffset',
'Map, inobject_properties_start_or_constructor_function_index, char, kInobjectPropertiesStartOrConstructorFunctionIndexOffset',
'Map, instance_type, uint16_t, kInstanceTypeOffset',
'Map, bit_field, char, kBitFieldOffset',
'Map, bit_field2, char, kBitField2Offset',