[torque] Add source positions for generated instance types
Change-Id: I13276e389fa71fb3de2ab3f7b685b021418acb1e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3211895 Auto-Submit: Camillo Bruni <cbruni@chromium.org> Commit-Queue: Nico Hartmann <nicohartmann@chromium.org> Reviewed-by: Nico Hartmann <nicohartmann@chromium.org> Cr-Commit-Position: refs/heads/main@{#77335}
This commit is contained in:
parent
8c598ace9e
commit
05056b27a8
@ -327,8 +327,9 @@ void PrintInstanceTypes(InstanceTypeTree* root, std::ostream& definitions,
|
||||
}
|
||||
if (root->num_own_values == 1) {
|
||||
definitions << inner_indent << "V(" << type_name << ", " << root->value
|
||||
<< ") \\\n";
|
||||
values << " V(" << type_name << ") \\\n";
|
||||
<< ") /* " << root->type->GetPosition() << " */\\\n";
|
||||
values << " V(" << type_name << ") /* " << root->type->GetPosition()
|
||||
<< " */\\\n";
|
||||
std::ostream& type_checker_list =
|
||||
root->type->HasUndefinedLayout()
|
||||
? (root->num_values == 1 ? only_declared_single_instance_types
|
||||
@ -336,7 +337,7 @@ void PrintInstanceTypes(InstanceTypeTree* root, std::ostream& definitions,
|
||||
: (root->num_values == 1 ? fully_defined_single_instance_types
|
||||
: fully_defined_multiple_instance_types);
|
||||
type_checker_list << " V(" << root->type->name() << ", " << type_name
|
||||
<< ") \\\n";
|
||||
<< ") /* " << root->type->GetPosition() << " */ \\\n";
|
||||
}
|
||||
for (auto& child : root->children) {
|
||||
PrintInstanceTypes(child.get(), definitions, values,
|
||||
|
@ -59,37 +59,37 @@ import sys
|
||||
#
|
||||
consts_misc = [
|
||||
{ 'name': 'FirstNonstringType', 'value': 'FIRST_NONSTRING_TYPE' },
|
||||
{ 'name': 'APIObjectType', 'value': 'JS_API_OBJECT_TYPE' },
|
||||
{ 'name': 'APIObjectType', 'value': 'JS_API_OBJECT_TYPE' },
|
||||
{ 'name': 'SpecialAPIObjectType', 'value': 'JS_SPECIAL_API_OBJECT_TYPE' },
|
||||
|
||||
{ 'name': 'FirstContextType', 'value': 'FIRST_CONTEXT_TYPE' },
|
||||
{ 'name': 'LastContextType', 'value': 'LAST_CONTEXT_TYPE' },
|
||||
|
||||
{ 'name': 'IsNotStringMask', 'value': 'kIsNotStringMask' },
|
||||
{ 'name': 'StringTag', 'value': 'kStringTag' },
|
||||
{ 'name': 'IsNotStringMask', 'value': 'kIsNotStringMask' },
|
||||
{ 'name': 'StringTag', 'value': 'kStringTag' },
|
||||
|
||||
{ 'name': 'StringEncodingMask', 'value': 'kStringEncodingMask' },
|
||||
{ 'name': 'TwoByteStringTag', 'value': 'kTwoByteStringTag' },
|
||||
{ 'name': 'OneByteStringTag', 'value': 'kOneByteStringTag' },
|
||||
|
||||
{ 'name': 'StringRepresentationMask',
|
||||
'value': 'kStringRepresentationMask' },
|
||||
{ 'name': 'SeqStringTag', 'value': 'kSeqStringTag' },
|
||||
{ 'name': 'ConsStringTag', 'value': 'kConsStringTag' },
|
||||
'value': 'kStringRepresentationMask' },
|
||||
{ 'name': 'SeqStringTag', 'value': 'kSeqStringTag' },
|
||||
{ 'name': 'ConsStringTag', 'value': 'kConsStringTag' },
|
||||
{ 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' },
|
||||
{ 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
|
||||
{ 'name': 'ThinStringTag', 'value': 'kThinStringTag' },
|
||||
{ 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
|
||||
{ 'name': 'ThinStringTag', 'value': 'kThinStringTag' },
|
||||
|
||||
{ 'name': 'HeapObjectTag', 'value': 'kHeapObjectTag' },
|
||||
{ 'name': 'HeapObjectTag', 'value': 'kHeapObjectTag' },
|
||||
{ 'name': 'HeapObjectTagMask', 'value': 'kHeapObjectTagMask' },
|
||||
{ 'name': 'SmiTag', 'value': 'kSmiTag' },
|
||||
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
|
||||
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
|
||||
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
|
||||
{ 'name': 'SmiTag', 'value': 'kSmiTag' },
|
||||
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
|
||||
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
|
||||
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
|
||||
{ 'name': 'SystemPointerSize', 'value': 'kSystemPointerSize' },
|
||||
{ 'name': 'SystemPointerSizeLog2', 'value': 'kSystemPointerSizeLog2' },
|
||||
{ 'name': 'TaggedSize', 'value': 'kTaggedSize' },
|
||||
{ 'name': 'TaggedSizeLog2', 'value': 'kTaggedSizeLog2' },
|
||||
{ 'name': 'TaggedSize', 'value': 'kTaggedSize' },
|
||||
{ 'name': 'TaggedSizeLog2', 'value': 'kTaggedSizeLog2' },
|
||||
|
||||
{ 'name': 'CodeKindFieldMask', 'value': 'Code::KindField::kMask' },
|
||||
{ 'name': 'CodeKindFieldShift', 'value': 'Code::KindField::kShift' },
|
||||
@ -101,18 +101,18 @@ consts_misc = [
|
||||
{ 'name': 'CodeKindBaseline',
|
||||
'value': 'static_cast<int>(CodeKind::BASELINE)' },
|
||||
|
||||
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
|
||||
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
|
||||
{ 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' },
|
||||
{ 'name': 'OddballNull', 'value': 'Oddball::kNull' },
|
||||
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
|
||||
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
|
||||
{ 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' },
|
||||
{ 'name': 'OddballNull', 'value': 'Oddball::kNull' },
|
||||
{ 'name': 'OddballArgumentsMarker', 'value': 'Oddball::kArgumentsMarker' },
|
||||
{ 'name': 'OddballUndefined', 'value': 'Oddball::kUndefined' },
|
||||
{ 'name': 'OddballUninitialized', 'value': 'Oddball::kUninitialized' },
|
||||
{ 'name': 'OddballOther', 'value': 'Oddball::kOther' },
|
||||
{ 'name': 'OddballOther', 'value': 'Oddball::kOther' },
|
||||
{ 'name': 'OddballException', 'value': 'Oddball::kException' },
|
||||
|
||||
{ 'name': 'ContextRegister', 'value': 'kContextRegister.code()' },
|
||||
{ 'name': 'ReturnRegister0', 'value': 'kReturnRegister0.code()' },
|
||||
{ 'name': 'ContextRegister', 'value': 'kContextRegister.code()' },
|
||||
{ 'name': 'ReturnRegister0', 'value': 'kReturnRegister0.code()' },
|
||||
{ 'name': 'JSFunctionRegister', 'value': 'kJSFunctionRegister.code()' },
|
||||
{ 'name': 'InterpreterBytecodeOffsetRegister',
|
||||
'value': 'kInterpreterBytecodeOffsetRegister.code()' },
|
||||
@ -122,129 +122,129 @@ consts_misc = [
|
||||
'value': 'kRuntimeCallFunctionRegister.code()' },
|
||||
|
||||
{ 'name': 'prop_kind_Data',
|
||||
'value': 'kData' },
|
||||
'value': 'kData' },
|
||||
{ 'name': 'prop_kind_Accessor',
|
||||
'value': 'kAccessor' },
|
||||
'value': 'kAccessor' },
|
||||
{ 'name': 'prop_kind_mask',
|
||||
'value': 'PropertyDetails::KindField::kMask' },
|
||||
'value': 'PropertyDetails::KindField::kMask' },
|
||||
{ 'name': 'prop_location_Descriptor',
|
||||
'value': 'static_cast<int>(PropertyLocation::kDescriptor)' },
|
||||
'value': 'static_cast<int>(PropertyLocation::kDescriptor)' },
|
||||
{ 'name': 'prop_location_Field',
|
||||
'value': 'static_cast<int>(PropertyLocation::kField)' },
|
||||
'value': 'static_cast<int>(PropertyLocation::kField)' },
|
||||
{ 'name': 'prop_location_mask',
|
||||
'value': 'PropertyDetails::LocationField::kMask' },
|
||||
'value': 'PropertyDetails::LocationField::kMask' },
|
||||
{ 'name': 'prop_location_shift',
|
||||
'value': 'PropertyDetails::LocationField::kShift' },
|
||||
'value': 'PropertyDetails::LocationField::kShift' },
|
||||
{ 'name': 'prop_attributes_NONE', 'value': 'NONE' },
|
||||
{ 'name': 'prop_attributes_READ_ONLY', 'value': 'READ_ONLY' },
|
||||
{ 'name': 'prop_attributes_DONT_ENUM', 'value': 'DONT_ENUM' },
|
||||
{ 'name': 'prop_attributes_DONT_DELETE', 'value': 'DONT_DELETE' },
|
||||
{ 'name': 'prop_attributes_mask',
|
||||
'value': 'PropertyDetails::AttributesField::kMask' },
|
||||
'value': 'PropertyDetails::AttributesField::kMask' },
|
||||
{ 'name': 'prop_attributes_shift',
|
||||
'value': 'PropertyDetails::AttributesField::kShift' },
|
||||
'value': 'PropertyDetails::AttributesField::kShift' },
|
||||
{ 'name': 'prop_index_mask',
|
||||
'value': 'PropertyDetails::FieldIndexField::kMask' },
|
||||
'value': 'PropertyDetails::FieldIndexField::kMask' },
|
||||
{ 'name': 'prop_index_shift',
|
||||
'value': 'PropertyDetails::FieldIndexField::kShift' },
|
||||
'value': 'PropertyDetails::FieldIndexField::kShift' },
|
||||
{ 'name': 'prop_representation_mask',
|
||||
'value': 'PropertyDetails::RepresentationField::kMask' },
|
||||
'value': 'PropertyDetails::RepresentationField::kMask' },
|
||||
{ 'name': 'prop_representation_shift',
|
||||
'value': 'PropertyDetails::RepresentationField::kShift' },
|
||||
'value': 'PropertyDetails::RepresentationField::kShift' },
|
||||
{ 'name': 'prop_representation_smi',
|
||||
'value': 'Representation::Kind::kSmi' },
|
||||
'value': 'Representation::Kind::kSmi' },
|
||||
{ 'name': 'prop_representation_double',
|
||||
'value': 'Representation::Kind::kDouble' },
|
||||
'value': 'Representation::Kind::kDouble' },
|
||||
{ 'name': 'prop_representation_heapobject',
|
||||
'value': 'Representation::Kind::kHeapObject' },
|
||||
'value': 'Representation::Kind::kHeapObject' },
|
||||
{ 'name': 'prop_representation_tagged',
|
||||
'value': 'Representation::Kind::kTagged' },
|
||||
'value': 'Representation::Kind::kTagged' },
|
||||
|
||||
{ 'name': 'prop_desc_key',
|
||||
'value': 'DescriptorArray::kEntryKeyIndex' },
|
||||
'value': 'DescriptorArray::kEntryKeyIndex' },
|
||||
{ 'name': 'prop_desc_details',
|
||||
'value': 'DescriptorArray::kEntryDetailsIndex' },
|
||||
'value': 'DescriptorArray::kEntryDetailsIndex' },
|
||||
{ 'name': 'prop_desc_value',
|
||||
'value': 'DescriptorArray::kEntryValueIndex' },
|
||||
'value': 'DescriptorArray::kEntryValueIndex' },
|
||||
{ 'name': 'prop_desc_size',
|
||||
'value': 'DescriptorArray::kEntrySize' },
|
||||
'value': 'DescriptorArray::kEntrySize' },
|
||||
|
||||
{ 'name': 'elements_fast_holey_elements',
|
||||
'value': 'HOLEY_ELEMENTS' },
|
||||
'value': 'HOLEY_ELEMENTS' },
|
||||
{ 'name': 'elements_fast_elements',
|
||||
'value': 'PACKED_ELEMENTS' },
|
||||
'value': 'PACKED_ELEMENTS' },
|
||||
{ 'name': 'elements_dictionary_elements',
|
||||
'value': 'DICTIONARY_ELEMENTS' },
|
||||
'value': 'DICTIONARY_ELEMENTS' },
|
||||
|
||||
{ 'name': 'bit_field2_elements_kind_mask',
|
||||
'value': 'Map::Bits2::ElementsKindBits::kMask' },
|
||||
'value': 'Map::Bits2::ElementsKindBits::kMask' },
|
||||
{ 'name': 'bit_field2_elements_kind_shift',
|
||||
'value': 'Map::Bits2::ElementsKindBits::kShift' },
|
||||
'value': 'Map::Bits2::ElementsKindBits::kShift' },
|
||||
{ 'name': 'bit_field3_is_dictionary_map_shift',
|
||||
'value': 'Map::Bits3::IsDictionaryMapBit::kShift' },
|
||||
'value': 'Map::Bits3::IsDictionaryMapBit::kShift' },
|
||||
{ 'name': 'bit_field3_number_of_own_descriptors_mask',
|
||||
'value': 'Map::Bits3::NumberOfOwnDescriptorsBits::kMask' },
|
||||
'value': 'Map::Bits3::NumberOfOwnDescriptorsBits::kMask' },
|
||||
{ 'name': 'bit_field3_number_of_own_descriptors_shift',
|
||||
'value': 'Map::Bits3::NumberOfOwnDescriptorsBits::kShift' },
|
||||
'value': 'Map::Bits3::NumberOfOwnDescriptorsBits::kShift' },
|
||||
{ 'name': 'class_Map__instance_descriptors_offset',
|
||||
'value': 'Map::kInstanceDescriptorsOffset' },
|
||||
'value': 'Map::kInstanceDescriptorsOffset' },
|
||||
|
||||
{ 'name': 'off_fp_context_or_frame_type',
|
||||
'value': 'CommonFrameConstants::kContextOrFrameTypeOffset'},
|
||||
'value': 'CommonFrameConstants::kContextOrFrameTypeOffset'},
|
||||
{ 'name': 'off_fp_context',
|
||||
'value': 'StandardFrameConstants::kContextOffset' },
|
||||
'value': 'StandardFrameConstants::kContextOffset' },
|
||||
{ 'name': 'off_fp_constant_pool',
|
||||
'value': 'StandardFrameConstants::kConstantPoolOffset' },
|
||||
'value': 'StandardFrameConstants::kConstantPoolOffset' },
|
||||
{ 'name': 'off_fp_function',
|
||||
'value': 'StandardFrameConstants::kFunctionOffset' },
|
||||
'value': 'StandardFrameConstants::kFunctionOffset' },
|
||||
{ 'name': 'off_fp_args',
|
||||
'value': 'StandardFrameConstants::kFixedFrameSizeAboveFp' },
|
||||
'value': 'StandardFrameConstants::kFixedFrameSizeAboveFp' },
|
||||
{ 'name': 'off_fp_bytecode_array',
|
||||
'value': 'UnoptimizedFrameConstants::kBytecodeArrayFromFp' },
|
||||
'value': 'UnoptimizedFrameConstants::kBytecodeArrayFromFp' },
|
||||
{ 'name': 'off_fp_bytecode_offset',
|
||||
'value': 'UnoptimizedFrameConstants::kBytecodeOffsetOrFeedbackVectorFromFp' },
|
||||
'value': 'UnoptimizedFrameConstants::kBytecodeOffsetOrFeedbackVectorFromFp' },
|
||||
|
||||
{ 'name': 'scopeinfo_idx_nparams',
|
||||
'value': 'ScopeInfo::kParameterCount' },
|
||||
'value': 'ScopeInfo::kParameterCount' },
|
||||
{ 'name': 'scopeinfo_idx_ncontextlocals',
|
||||
'value': 'ScopeInfo::kContextLocalCount' },
|
||||
'value': 'ScopeInfo::kContextLocalCount' },
|
||||
{ 'name': 'scopeinfo_idx_first_vars',
|
||||
'value': 'ScopeInfo::kVariablePartIndex' },
|
||||
'value': 'ScopeInfo::kVariablePartIndex' },
|
||||
|
||||
{ 'name': 'jsarray_buffer_was_detached_mask',
|
||||
'value': 'JSArrayBuffer::WasDetachedBit::kMask' },
|
||||
'value': 'JSArrayBuffer::WasDetachedBit::kMask' },
|
||||
{ 'name': 'jsarray_buffer_was_detached_shift',
|
||||
'value': 'JSArrayBuffer::WasDetachedBit::kShift' },
|
||||
'value': 'JSArrayBuffer::WasDetachedBit::kShift' },
|
||||
|
||||
{ 'name': 'context_idx_scope_info',
|
||||
'value': 'Context::SCOPE_INFO_INDEX' },
|
||||
'value': 'Context::SCOPE_INFO_INDEX' },
|
||||
{ 'name': 'context_idx_prev',
|
||||
'value': 'Context::PREVIOUS_INDEX' },
|
||||
'value': 'Context::PREVIOUS_INDEX' },
|
||||
{ 'name': 'context_min_slots',
|
||||
'value': 'Context::MIN_CONTEXT_SLOTS' },
|
||||
'value': 'Context::MIN_CONTEXT_SLOTS' },
|
||||
{ 'name': 'native_context_embedder_data_offset',
|
||||
'value': 'Internals::kNativeContextEmbedderDataOffset' },
|
||||
'value': 'Internals::kNativeContextEmbedderDataOffset' },
|
||||
|
||||
|
||||
{ 'name': 'namedictionaryshape_prefix_size',
|
||||
'value': 'NameDictionaryShape::kPrefixSize' },
|
||||
'value': 'NameDictionaryShape::kPrefixSize' },
|
||||
{ 'name': 'namedictionaryshape_entry_size',
|
||||
'value': 'NameDictionaryShape::kEntrySize' },
|
||||
'value': 'NameDictionaryShape::kEntrySize' },
|
||||
{ 'name': 'globaldictionaryshape_entry_size',
|
||||
'value': 'GlobalDictionaryShape::kEntrySize' },
|
||||
'value': 'GlobalDictionaryShape::kEntrySize' },
|
||||
|
||||
{ 'name': 'namedictionary_prefix_start_index',
|
||||
'value': 'NameDictionary::kPrefixStartIndex' },
|
||||
'value': 'NameDictionary::kPrefixStartIndex' },
|
||||
|
||||
{ 'name': 'numberdictionaryshape_prefix_size',
|
||||
'value': 'NumberDictionaryShape::kPrefixSize' },
|
||||
'value': 'NumberDictionaryShape::kPrefixSize' },
|
||||
{ 'name': 'numberdictionaryshape_entry_size',
|
||||
'value': 'NumberDictionaryShape::kEntrySize' },
|
||||
'value': 'NumberDictionaryShape::kEntrySize' },
|
||||
|
||||
{ 'name': 'simplenumberdictionaryshape_prefix_size',
|
||||
'value': 'SimpleNumberDictionaryShape::kPrefixSize' },
|
||||
'value': 'SimpleNumberDictionaryShape::kPrefixSize' },
|
||||
{ 'name': 'simplenumberdictionaryshape_entry_size',
|
||||
'value': 'SimpleNumberDictionaryShape::kEntrySize' },
|
||||
'value': 'SimpleNumberDictionaryShape::kEntrySize' },
|
||||
|
||||
{ 'name': 'type_JSError__JS_ERROR_TYPE', 'value': 'JS_ERROR_TYPE' },
|
||||
];
|
||||
@ -326,10 +326,10 @@ expected_classes = [
|
||||
# The following structures store high-level representations of the structures
|
||||
# for which we're going to emit descriptive constants.
|
||||
#
|
||||
types = {}; # set of all type names
|
||||
types = {}; # set of all type names
|
||||
typeclasses = {}; # maps type names to corresponding class names
|
||||
klasses = {}; # known classes, including parents
|
||||
fields = []; # field declarations
|
||||
klasses = {}; # known classes, including parents
|
||||
fields = []; # field declarations
|
||||
|
||||
header = '''
|
||||
/*
|
||||
@ -372,393 +372,395 @@ footer = '''
|
||||
# Get the base class
|
||||
#
|
||||
def get_base_class(klass):
|
||||
if (klass == 'Object'):
|
||||
return klass;
|
||||
if (klass == 'Object'):
|
||||
return klass;
|
||||
|
||||
if (not (klass in klasses)):
|
||||
return None;
|
||||
if (not (klass in klasses)):
|
||||
return None;
|
||||
|
||||
k = klasses[klass];
|
||||
k = klasses[klass];
|
||||
|
||||
return get_base_class(k['parent']);
|
||||
return get_base_class(k['parent']);
|
||||
|
||||
#
|
||||
# Loads class hierarchy and type information from "objects.h" etc.
|
||||
#
|
||||
def load_objects():
|
||||
#
|
||||
# Construct a dictionary for the classes we're sure should be present.
|
||||
#
|
||||
checktypes = {};
|
||||
for klass in expected_classes:
|
||||
checktypes[klass] = True;
|
||||
#
|
||||
# Construct a dictionary for the classes we're sure should be present.
|
||||
#
|
||||
checktypes = {};
|
||||
for klass in expected_classes:
|
||||
checktypes[klass] = True;
|
||||
|
||||
|
||||
for filename in sys.argv[2:]:
|
||||
if not filename.endswith("-inl.h"):
|
||||
load_objects_from_file(filename, checktypes)
|
||||
for filename in sys.argv[2:]:
|
||||
if not filename.endswith("-inl.h"):
|
||||
load_objects_from_file(filename, checktypes)
|
||||
|
||||
if (len(checktypes) > 0):
|
||||
for klass in checktypes:
|
||||
print('error: expected class \"%s\" not found' % klass);
|
||||
if (len(checktypes) > 0):
|
||||
for klass in checktypes:
|
||||
print('error: expected class \"%s\" not found' % klass);
|
||||
|
||||
sys.exit(1);
|
||||
sys.exit(1);
|
||||
|
||||
|
||||
def load_objects_from_file(objfilename, checktypes):
|
||||
objfile = io.open(objfilename, 'r', encoding='utf-8');
|
||||
in_insttype = False;
|
||||
objfile = io.open(objfilename, 'r', encoding='utf-8');
|
||||
in_insttype = False;
|
||||
in_torque_insttype = False
|
||||
in_torque_fulldef = False
|
||||
|
||||
typestr = '';
|
||||
torque_typestr = ''
|
||||
torque_fulldefstr = ''
|
||||
uncommented_file = ''
|
||||
|
||||
#
|
||||
# Iterate the header file line-by-line to collect type and class
|
||||
# information. For types, we accumulate a string representing the entire
|
||||
# InstanceType enum definition and parse it later because it's easier to
|
||||
# do so without the embedded newlines.
|
||||
#
|
||||
for line in objfile:
|
||||
if (line.startswith('enum InstanceType : uint16_t {')):
|
||||
in_insttype = True;
|
||||
continue;
|
||||
|
||||
if (line.startswith('#define TORQUE_ASSIGNED_INSTANCE_TYPE_LIST')):
|
||||
in_torque_insttype = True
|
||||
continue
|
||||
|
||||
if (line.startswith('#define TORQUE_INSTANCE_CHECKERS_SINGLE_FULLY_DEFINED')):
|
||||
in_torque_fulldef = True
|
||||
continue
|
||||
|
||||
if (in_insttype and line.startswith('};')):
|
||||
in_insttype = False;
|
||||
continue;
|
||||
|
||||
if (in_torque_insttype and (not line or line.isspace())):
|
||||
in_torque_insttype = False
|
||||
continue
|
||||
|
||||
if (in_torque_fulldef and (not line or line.isspace())):
|
||||
in_torque_fulldef = False
|
||||
continue
|
||||
|
||||
typestr = '';
|
||||
torque_typestr = ''
|
||||
torque_fulldefstr = ''
|
||||
uncommented_file = ''
|
||||
line = re.sub('//.*', '', line.strip());
|
||||
|
||||
#
|
||||
# Iterate the header file line-by-line to collect type and class
|
||||
# information. For types, we accumulate a string representing the entire
|
||||
# InstanceType enum definition and parse it later because it's easier to
|
||||
# do so without the embedded newlines.
|
||||
#
|
||||
for line in objfile:
|
||||
if (line.startswith('enum InstanceType : uint16_t {')):
|
||||
in_insttype = True;
|
||||
continue;
|
||||
if (in_insttype):
|
||||
typestr += line;
|
||||
continue;
|
||||
|
||||
if (line.startswith('#define TORQUE_ASSIGNED_INSTANCE_TYPE_LIST')):
|
||||
in_torque_insttype = True
|
||||
continue
|
||||
if (in_torque_insttype):
|
||||
torque_typestr += line
|
||||
continue
|
||||
|
||||
if (line.startswith('#define TORQUE_INSTANCE_CHECKERS_SINGLE_FULLY_DEFINED')):
|
||||
in_torque_fulldef = True
|
||||
continue
|
||||
if (in_torque_fulldef):
|
||||
torque_fulldefstr += line
|
||||
continue
|
||||
|
||||
if (in_insttype and line.startswith('};')):
|
||||
in_insttype = False;
|
||||
continue;
|
||||
uncommented_file += '\n' + line
|
||||
|
||||
if (in_torque_insttype and (not line or line.isspace())):
|
||||
in_torque_insttype = False
|
||||
continue
|
||||
for match in re.finditer(r'\nclass(?:\s+V8_EXPORT(?:_PRIVATE)?)?'
|
||||
r'\s+(\w[^:;]*)'
|
||||
r'(?:: public (\w[^{]*))?\s*{\s*',
|
||||
uncommented_file):
|
||||
klass = match.group(1).strip();
|
||||
pklass = match.group(2);
|
||||
if (pklass):
|
||||
# Check for generated Torque class.
|
||||
gen_match = re.match(
|
||||
r'TorqueGenerated\w+\s*<\s*\w+,\s*(\w+)\s*>',
|
||||
pklass)
|
||||
if (gen_match):
|
||||
pklass = gen_match.group(1)
|
||||
# Strip potential template arguments from parent
|
||||
# class.
|
||||
match = re.match(r'(\w+)(<.*>)?', pklass.strip());
|
||||
pklass = match.group(1).strip();
|
||||
klasses[klass] = { 'parent': pklass };
|
||||
|
||||
if (in_torque_fulldef and (not line or line.isspace())):
|
||||
in_torque_fulldef = False
|
||||
continue
|
||||
#
|
||||
# Process the instance type declaration.
|
||||
#
|
||||
entries = typestr.split(',');
|
||||
for entry in entries:
|
||||
types[re.sub('\s*=.*', '', entry).lstrip()] = True;
|
||||
entries = torque_typestr.split('\\')
|
||||
for entry in entries:
|
||||
name = re.sub(r' *V\(|\).*', '', entry)
|
||||
types[name] = True
|
||||
entries = torque_fulldefstr.split('\\')
|
||||
for entry in entries:
|
||||
entry = entry.strip()
|
||||
if not entry:
|
||||
continue
|
||||
start = entry.find('(');
|
||||
end = entry.find(')', start);
|
||||
rest = entry[start + 1: end];
|
||||
args = re.split('\s*,\s*', rest);
|
||||
typename = args[0]
|
||||
typeconst = args[1]
|
||||
types[typeconst] = True
|
||||
typeclasses[typeconst] = typename
|
||||
#
|
||||
# Infer class names for each type based on a systematic transformation.
|
||||
# For example, "JS_FUNCTION_TYPE" becomes "JSFunction". We find the
|
||||
# class for each type rather than the other way around because there are
|
||||
# fewer cases where one type maps to more than one class than the other
|
||||
# way around.
|
||||
#
|
||||
for type in types:
|
||||
usetype = type
|
||||
|
||||
line = re.sub('//.*', '', line.strip());
|
||||
#
|
||||
# Remove the "_TYPE" suffix and then convert to camel case,
|
||||
# except that a "JS" prefix remains uppercase (as in
|
||||
# "JS_FUNCTION_TYPE" => "JSFunction").
|
||||
#
|
||||
if (not usetype.endswith('_TYPE')):
|
||||
continue;
|
||||
|
||||
if (in_insttype):
|
||||
typestr += line;
|
||||
continue;
|
||||
usetype = usetype[0:len(usetype) - len('_TYPE')];
|
||||
parts = usetype.split('_');
|
||||
cctype = '';
|
||||
|
||||
if (in_torque_insttype):
|
||||
torque_typestr += line
|
||||
continue
|
||||
if (parts[0] == 'JS'):
|
||||
cctype = 'JS';
|
||||
start = 1;
|
||||
else:
|
||||
cctype = '';
|
||||
start = 0;
|
||||
|
||||
if (in_torque_fulldef):
|
||||
torque_fulldefstr += line
|
||||
continue
|
||||
for ii in range(start, len(parts)):
|
||||
part = parts[ii];
|
||||
cctype += part[0].upper() + part[1:].lower();
|
||||
|
||||
uncommented_file += '\n' + line
|
||||
#
|
||||
# Mapping string types is more complicated. Both types and
|
||||
# class names for Strings specify a representation (e.g., Seq,
|
||||
# Cons, External, or Sliced) and an encoding (TwoByte/OneByte),
|
||||
# In the simplest case, both of these are explicit in both
|
||||
# names, as in:
|
||||
#
|
||||
# EXTERNAL_ONE_BYTE_STRING_TYPE => ExternalOneByteString
|
||||
#
|
||||
# However, either the representation or encoding can be omitted
|
||||
# from the type name, in which case "Seq" and "TwoByte" are
|
||||
# assumed, as in:
|
||||
#
|
||||
# STRING_TYPE => SeqTwoByteString
|
||||
#
|
||||
# Additionally, sometimes the type name has more information
|
||||
# than the class, as in:
|
||||
#
|
||||
# CONS_ONE_BYTE_STRING_TYPE => ConsString
|
||||
#
|
||||
# To figure this out dynamically, we first check for a
|
||||
# representation and encoding and add them if they're not
|
||||
# present. If that doesn't yield a valid class name, then we
|
||||
# strip out the representation.
|
||||
#
|
||||
if (cctype.endswith('String')):
|
||||
if (cctype.find('Cons') == -1 and
|
||||
cctype.find('External') == -1 and
|
||||
cctype.find('Sliced') == -1):
|
||||
if (cctype.find('OneByte') != -1):
|
||||
cctype = re.sub('OneByteString$',
|
||||
'SeqOneByteString', cctype);
|
||||
else:
|
||||
cctype = re.sub('String$',
|
||||
'SeqString', cctype);
|
||||
|
||||
for match in re.finditer(r'\nclass(?:\s+V8_EXPORT(?:_PRIVATE)?)?'
|
||||
r'\s+(\w[^:;]*)'
|
||||
r'(?:: public (\w[^{]*))?\s*{\s*',
|
||||
uncommented_file):
|
||||
klass = match.group(1).strip();
|
||||
pklass = match.group(2);
|
||||
if (pklass):
|
||||
# Check for generated Torque class.
|
||||
gen_match = re.match(
|
||||
r'TorqueGenerated\w+\s*<\s*\w+,\s*(\w+)\s*>',
|
||||
pklass)
|
||||
if (gen_match):
|
||||
pklass = gen_match.group(1)
|
||||
# Strip potential template arguments from parent
|
||||
# class.
|
||||
match = re.match(r'(\w+)(<.*>)?', pklass.strip());
|
||||
pklass = match.group(1).strip();
|
||||
klasses[klass] = { 'parent': pklass };
|
||||
if (cctype.find('OneByte') == -1):
|
||||
cctype = re.sub('String$', 'TwoByteString',
|
||||
cctype);
|
||||
|
||||
#
|
||||
# Process the instance type declaration.
|
||||
#
|
||||
entries = typestr.split(',');
|
||||
for entry in entries:
|
||||
types[re.sub('\s*=.*', '', entry).lstrip()] = True;
|
||||
entries = torque_typestr.split('\\')
|
||||
for entry in entries:
|
||||
types[re.sub(r' *V\(|\) *', '', entry)] = True
|
||||
entries = torque_fulldefstr.split('\\')
|
||||
for entry in entries:
|
||||
entry = entry.strip()
|
||||
if not entry:
|
||||
continue
|
||||
idx = entry.find('(');
|
||||
rest = entry[idx + 1: len(entry) - 1];
|
||||
args = re.split('\s*,\s*', rest);
|
||||
typename = args[0]
|
||||
typeconst = args[1]
|
||||
types[typeconst] = True
|
||||
typeclasses[typeconst] = typename
|
||||
#
|
||||
# Infer class names for each type based on a systematic transformation.
|
||||
# For example, "JS_FUNCTION_TYPE" becomes "JSFunction". We find the
|
||||
# class for each type rather than the other way around because there are
|
||||
# fewer cases where one type maps to more than one class than the other
|
||||
# way around.
|
||||
#
|
||||
for type in types:
|
||||
usetype = type
|
||||
if (not (cctype in klasses)):
|
||||
cctype = re.sub('OneByte', '', cctype);
|
||||
cctype = re.sub('TwoByte', '', cctype);
|
||||
|
||||
#
|
||||
# Remove the "_TYPE" suffix and then convert to camel case,
|
||||
# except that a "JS" prefix remains uppercase (as in
|
||||
# "JS_FUNCTION_TYPE" => "JSFunction").
|
||||
#
|
||||
if (not usetype.endswith('_TYPE')):
|
||||
continue;
|
||||
|
||||
usetype = usetype[0:len(usetype) - len('_TYPE')];
|
||||
parts = usetype.split('_');
|
||||
cctype = '';
|
||||
|
||||
if (parts[0] == 'JS'):
|
||||
cctype = 'JS';
|
||||
start = 1;
|
||||
else:
|
||||
cctype = '';
|
||||
start = 0;
|
||||
|
||||
for ii in range(start, len(parts)):
|
||||
part = parts[ii];
|
||||
cctype += part[0].upper() + part[1:].lower();
|
||||
|
||||
#
|
||||
# Mapping string types is more complicated. Both types and
|
||||
# class names for Strings specify a representation (e.g., Seq,
|
||||
# Cons, External, or Sliced) and an encoding (TwoByte/OneByte),
|
||||
# In the simplest case, both of these are explicit in both
|
||||
# names, as in:
|
||||
#
|
||||
# EXTERNAL_ONE_BYTE_STRING_TYPE => ExternalOneByteString
|
||||
#
|
||||
# However, either the representation or encoding can be omitted
|
||||
# from the type name, in which case "Seq" and "TwoByte" are
|
||||
# assumed, as in:
|
||||
#
|
||||
# STRING_TYPE => SeqTwoByteString
|
||||
#
|
||||
# Additionally, sometimes the type name has more information
|
||||
# than the class, as in:
|
||||
#
|
||||
# CONS_ONE_BYTE_STRING_TYPE => ConsString
|
||||
#
|
||||
# To figure this out dynamically, we first check for a
|
||||
# representation and encoding and add them if they're not
|
||||
# present. If that doesn't yield a valid class name, then we
|
||||
# strip out the representation.
|
||||
#
|
||||
if (cctype.endswith('String')):
|
||||
if (cctype.find('Cons') == -1 and
|
||||
cctype.find('External') == -1 and
|
||||
cctype.find('Sliced') == -1):
|
||||
if (cctype.find('OneByte') != -1):
|
||||
cctype = re.sub('OneByteString$',
|
||||
'SeqOneByteString', cctype);
|
||||
else:
|
||||
cctype = re.sub('String$',
|
||||
'SeqString', cctype);
|
||||
|
||||
if (cctype.find('OneByte') == -1):
|
||||
cctype = re.sub('String$', 'TwoByteString',
|
||||
cctype);
|
||||
|
||||
if (not (cctype in klasses)):
|
||||
cctype = re.sub('OneByte', '', cctype);
|
||||
cctype = re.sub('TwoByte', '', cctype);
|
||||
|
||||
#
|
||||
# Despite all that, some types have no corresponding class.
|
||||
#
|
||||
if (cctype in klasses):
|
||||
typeclasses[type] = cctype;
|
||||
if (cctype in checktypes):
|
||||
del checktypes[cctype];
|
||||
#
|
||||
# Despite all that, some types have no corresponding class.
|
||||
#
|
||||
if (cctype in klasses):
|
||||
typeclasses[type] = cctype;
|
||||
if (cctype in checktypes):
|
||||
del checktypes[cctype];
|
||||
|
||||
#
|
||||
# For a given macro call, pick apart the arguments and return an object
|
||||
# describing the corresponding output constant. See load_fields().
|
||||
#
|
||||
def parse_field(call):
|
||||
# Replace newlines with spaces.
|
||||
for ii in range(0, len(call)):
|
||||
if (call[ii] == '\n'):
|
||||
call[ii] == ' ';
|
||||
# Replace newlines with spaces.
|
||||
for ii in range(0, len(call)):
|
||||
if (call[ii] == '\n'):
|
||||
call[ii] == ' ';
|
||||
|
||||
idx = call.find('(');
|
||||
kind = call[0:idx];
|
||||
rest = call[idx + 1: len(call) - 1];
|
||||
args = re.split('\s*,\s*', rest);
|
||||
idx = call.find('(');
|
||||
kind = call[0:idx];
|
||||
rest = call[idx + 1: len(call) - 1];
|
||||
args = re.split('\s*,\s*', rest);
|
||||
|
||||
consts = [];
|
||||
consts = [];
|
||||
|
||||
klass = args[0];
|
||||
field = args[1];
|
||||
dtype = None
|
||||
offset = None
|
||||
if kind.startswith('WEAK_ACCESSORS'):
|
||||
dtype = 'weak'
|
||||
offset = args[2];
|
||||
elif not (kind.startswith('SMI_ACCESSORS') or kind.startswith('ACCESSORS_TO_SMI')):
|
||||
dtype = args[2].replace('<', '_').replace('>', '_')
|
||||
offset = args[3];
|
||||
else:
|
||||
offset = args[2];
|
||||
dtype = 'SMI'
|
||||
klass = args[0];
|
||||
field = args[1];
|
||||
dtype = None
|
||||
offset = None
|
||||
if kind.startswith('WEAK_ACCESSORS'):
|
||||
dtype = 'weak'
|
||||
offset = args[2];
|
||||
elif not (kind.startswith('SMI_ACCESSORS') or kind.startswith('ACCESSORS_TO_SMI')):
|
||||
dtype = args[2].replace('<', '_').replace('>', '_')
|
||||
offset = args[3];
|
||||
else:
|
||||
offset = args[2];
|
||||
dtype = 'SMI'
|
||||
|
||||
|
||||
assert(offset is not None and dtype is not None);
|
||||
return ({
|
||||
'name': 'class_%s__%s__%s' % (klass, field, dtype),
|
||||
'value': '%s::%s' % (klass, offset)
|
||||
});
|
||||
assert(offset is not None and dtype is not None);
|
||||
return ({
|
||||
'name': 'class_%s__%s__%s' % (klass, field, dtype),
|
||||
'value': '%s::%s' % (klass, offset)
|
||||
});
|
||||
|
||||
#
|
||||
# Load field offset information from objects-inl.h etc.
|
||||
#
|
||||
def load_fields():
|
||||
for filename in sys.argv[2:]:
|
||||
if filename.endswith("-inl.h"):
|
||||
load_fields_from_file(filename)
|
||||
for filename in sys.argv[2:]:
|
||||
if filename.endswith("-inl.h"):
|
||||
load_fields_from_file(filename)
|
||||
|
||||
for body in extras_accessors:
|
||||
fields.append(parse_field('ACCESSORS(%s)' % body));
|
||||
for body in extras_accessors:
|
||||
fields.append(parse_field('ACCESSORS(%s)' % body));
|
||||
|
||||
|
||||
def load_fields_from_file(filename):
|
||||
inlfile = io.open(filename, 'r', encoding='utf-8');
|
||||
inlfile = io.open(filename, 'r', encoding='utf-8');
|
||||
|
||||
#
|
||||
# Each class's fields and the corresponding offsets are described in the
|
||||
# source by calls to macros like "ACCESSORS" (and friends). All we do
|
||||
# here is extract these macro invocations, taking into account that they
|
||||
# may span multiple lines and may contain nested parentheses. We also
|
||||
# call parse_field() to pick apart the invocation.
|
||||
#
|
||||
prefixes = [ 'ACCESSORS', 'ACCESSORS2', 'ACCESSORS_GCSAFE',
|
||||
'SMI_ACCESSORS', 'ACCESSORS_TO_SMI',
|
||||
'RELEASE_ACQUIRE_ACCESSORS', 'WEAK_ACCESSORS' ];
|
||||
prefixes += ([ prefix + "_CHECKED" for prefix in prefixes ] +
|
||||
[ prefix + "_CHECKED2" for prefix in prefixes ])
|
||||
#
|
||||
# Each class's fields and the corresponding offsets are described in the
|
||||
# source by calls to macros like "ACCESSORS" (and friends). All we do
|
||||
# here is extract these macro invocations, taking into account that they
|
||||
# may span multiple lines and may contain nested parentheses. We also
|
||||
# call parse_field() to pick apart the invocation.
|
||||
#
|
||||
prefixes = [ 'ACCESSORS', 'ACCESSORS2', 'ACCESSORS_GCSAFE',
|
||||
'SMI_ACCESSORS', 'ACCESSORS_TO_SMI',
|
||||
'RELEASE_ACQUIRE_ACCESSORS', 'WEAK_ACCESSORS' ];
|
||||
prefixes += ([ prefix + "_CHECKED" for prefix in prefixes ] +
|
||||
[ prefix + "_CHECKED2" for prefix in prefixes ])
|
||||
current = '';
|
||||
opens = 0;
|
||||
|
||||
for line in inlfile:
|
||||
if (opens > 0):
|
||||
# Continuation line
|
||||
for ii in range(0, len(line)):
|
||||
if (line[ii] == '('):
|
||||
opens += 1;
|
||||
elif (line[ii] == ')'):
|
||||
opens -= 1;
|
||||
|
||||
if (opens == 0):
|
||||
break;
|
||||
|
||||
current += line[0:ii + 1];
|
||||
continue;
|
||||
|
||||
for prefix in prefixes:
|
||||
if (not line.startswith(prefix + '(')):
|
||||
continue;
|
||||
|
||||
if (len(current) > 0):
|
||||
fields.append(parse_field(current));
|
||||
current = '';
|
||||
opens = 0;
|
||||
|
||||
for line in inlfile:
|
||||
if (opens > 0):
|
||||
# Continuation line
|
||||
for ii in range(0, len(line)):
|
||||
if (line[ii] == '('):
|
||||
opens += 1;
|
||||
elif (line[ii] == ')'):
|
||||
opens -= 1;
|
||||
for ii in range(len(prefix), len(line)):
|
||||
if (line[ii] == '('):
|
||||
opens += 1;
|
||||
elif (line[ii] == ')'):
|
||||
opens -= 1;
|
||||
|
||||
if (opens == 0):
|
||||
break;
|
||||
if (opens == 0):
|
||||
break;
|
||||
|
||||
current += line[0:ii + 1];
|
||||
continue;
|
||||
current += line[0:ii + 1];
|
||||
|
||||
for prefix in prefixes:
|
||||
if (not line.startswith(prefix + '(')):
|
||||
continue;
|
||||
|
||||
if (len(current) > 0):
|
||||
fields.append(parse_field(current));
|
||||
current = '';
|
||||
|
||||
for ii in range(len(prefix), len(line)):
|
||||
if (line[ii] == '('):
|
||||
opens += 1;
|
||||
elif (line[ii] == ')'):
|
||||
opens -= 1;
|
||||
|
||||
if (opens == 0):
|
||||
break;
|
||||
|
||||
current += line[0:ii + 1];
|
||||
|
||||
if (len(current) > 0):
|
||||
fields.append(parse_field(current));
|
||||
current = '';
|
||||
if (len(current) > 0):
|
||||
fields.append(parse_field(current));
|
||||
current = '';
|
||||
|
||||
#
|
||||
# Emit a block of constants.
|
||||
#
|
||||
def emit_set(out, consts):
|
||||
lines = set() # To remove duplicates.
|
||||
lines = set() # To remove duplicates.
|
||||
|
||||
# Fix up overzealous parses. This could be done inside the
|
||||
# parsers but as there are several, it's easiest to do it here.
|
||||
ws = re.compile('\s+')
|
||||
for const in consts:
|
||||
name = ws.sub('', const['name'])
|
||||
value = ws.sub('', str(const['value'])) # Can be a number.
|
||||
lines.add('V8_EXPORT int v8dbg_%s = %s;\n' % (name, value))
|
||||
# Fix up overzealous parses. This could be done inside the
|
||||
# parsers but as there are several, it's easiest to do it here.
|
||||
ws = re.compile('\s+')
|
||||
for const in consts:
|
||||
name = ws.sub('', const['name'])
|
||||
value = ws.sub('', str(const['value'])) # Can be a number.
|
||||
lines.add('V8_EXPORT int v8dbg_%s = %s;\n' % (name, value))
|
||||
|
||||
for line in lines:
|
||||
out.write(line);
|
||||
out.write('\n');
|
||||
for line in lines:
|
||||
out.write(line);
|
||||
out.write('\n');
|
||||
|
||||
#
|
||||
# Emit the whole output file.
|
||||
#
|
||||
def emit_config():
|
||||
out = open(sys.argv[1], 'w');
|
||||
out = open(sys.argv[1], 'w');
|
||||
|
||||
out.write(header);
|
||||
out.write(header);
|
||||
|
||||
out.write('/* miscellaneous constants */\n');
|
||||
emit_set(out, consts_misc);
|
||||
out.write('/* miscellaneous constants */\n');
|
||||
emit_set(out, consts_misc);
|
||||
|
||||
out.write('/* class type information */\n');
|
||||
consts = [];
|
||||
for typename in sorted(typeclasses):
|
||||
klass = typeclasses[typename];
|
||||
consts.append({
|
||||
'name': 'type_%s__%s' % (klass, typename),
|
||||
'value': typename
|
||||
});
|
||||
out.write('/* class type information */\n');
|
||||
consts = [];
|
||||
for typename in sorted(typeclasses):
|
||||
klass = typeclasses[typename];
|
||||
consts.append({
|
||||
'name': 'type_%s__%s' % (klass, typename),
|
||||
'value': typename
|
||||
});
|
||||
|
||||
emit_set(out, consts);
|
||||
emit_set(out, consts);
|
||||
|
||||
out.write('/* class hierarchy information */\n');
|
||||
consts = [];
|
||||
for klassname in sorted(klasses):
|
||||
pklass = klasses[klassname]['parent'];
|
||||
bklass = get_base_class(klassname);
|
||||
if (bklass != 'Object'):
|
||||
continue;
|
||||
if (pklass == None):
|
||||
continue;
|
||||
out.write('/* class hierarchy information */\n');
|
||||
consts = [];
|
||||
for klassname in sorted(klasses):
|
||||
pklass = klasses[klassname]['parent'];
|
||||
bklass = get_base_class(klassname);
|
||||
if (bklass != 'Object'):
|
||||
continue;
|
||||
if (pklass == None):
|
||||
continue;
|
||||
|
||||
consts.append({
|
||||
'name': 'parent_%s__%s' % (klassname, pklass),
|
||||
'value': 0
|
||||
});
|
||||
consts.append({
|
||||
'name': 'parent_%s__%s' % (klassname, pklass),
|
||||
'value': 0
|
||||
});
|
||||
|
||||
emit_set(out, consts);
|
||||
emit_set(out, consts);
|
||||
|
||||
out.write('/* field information */\n');
|
||||
emit_set(out, fields);
|
||||
out.write('/* field information */\n');
|
||||
emit_set(out, fields);
|
||||
|
||||
out.write(footer);
|
||||
out.write(footer);
|
||||
|
||||
if (len(sys.argv) < 4):
|
||||
print('usage: %s output.cc objects.h objects-inl.h' % sys.argv[0]);
|
||||
sys.exit(2);
|
||||
print('usage: %s output.cc objects.h objects-inl.h' % sys.argv[0]);
|
||||
sys.exit(2);
|
||||
|
||||
load_objects();
|
||||
load_fields();
|
||||
|
Loading…
Reference in New Issue
Block a user