[torque] Verify nested struct fields in classes

As one of several steps involved in supporting struct-valued fields
within classes, this CL generates type verification code for the data
contained in those structs.

In order to generate verification code, Torque needs to know about
struct field offsets and the total size of structs. Those calculations
are added to StructType itself and the function TypeVisitor::ComputeType
which initializes the StructType. I repurposed the Field::offset value
to behave in structs more like it does in classes (it had previously
indicated the index of a field within a struct, but nobody used that
value). Overall this works okay, and I think it's less confusing to have
Field::offset mean the same thing everywhere. However, some struct
fields have types with unknown size (Field::GetFieldSizeInformation
fails), so those fields are now marked with offset Field::kInvalidOffset
to indicate that the structs containing them should not be used within
class fields or anywhere else that requires packed layout.

Bug: v8:7793
Change-Id: If2677c8c81efc85e63b4bfb831d818a748427e18
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1897247
Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65016}
This commit is contained in:
Seth Brenith 2019-11-18 09:12:39 -08:00 committed by Commit Bot
parent ef651b3491
commit 88a2d01148
8 changed files with 213 additions and 88 deletions

View File

@ -569,13 +569,6 @@ void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::DescriptorArrayVerify(*this, isolate);
for (int i = 0; i < number_of_all_descriptors(); i++) {
MaybeObjectSlot slot(GetDescriptorSlot(i));
MaybeObject::VerifyMaybeObjectPointer(isolate, *(slot + kEntryKeyIndex));
MaybeObject::VerifyMaybeObjectPointer(isolate,
*(slot + kEntryDetailsIndex));
MaybeObject::VerifyMaybeObjectPointer(isolate, *(slot + kEntryValueIndex));
}
if (number_of_all_descriptors() == 0) {
CHECK_EQ(ReadOnlyRoots(isolate).empty_descriptor_array(), *this);
CHECK_EQ(0, number_of_all_descriptors());

View File

@ -155,7 +155,7 @@ class DescriptorArray : public HeapObject {
return OffsetOfDescriptorAt(number_of_all_descriptors);
}
static constexpr int OffsetOfDescriptorAt(int descriptor) {
return kHeaderSize + descriptor * kEntrySize * kTaggedSize;
return kDescriptorsOffset + descriptor * kEntrySize * kTaggedSize;
}
inline ObjectSlot GetFirstPointerSlot();
inline ObjectSlot GetDescriptorSlot(int descriptor);
@ -164,6 +164,8 @@ class DescriptorArray : public HeapObject {
"Weak fields follow strong fields.");
static_assert(kEndOfWeakFieldsOffset == kHeaderSize,
"Weak fields extend up to the end of the header.");
static_assert(kDescriptorsOffset == kHeaderSize,
"Variable-size array follows header.");
// We use this visitor to also visitor to also visit the enum_cache, which is
// the only tagged field in the header, and placed at the end of the header.
using BodyDescriptor = FlexibleWeakBodyDescriptor<kStartOfStrongFieldsOffset>;

View File

@ -9,8 +9,15 @@ extern class EnumCache extends Struct {
indices: FixedArray;
}
// We make this class abstract because it is missing the variable-sized part,
// which is still impossible to express in Torque.
@export
struct DescriptorEntry {
key: Name|Undefined;
details: Smi|Undefined;
value: JSAny|Weak<Map>|AccessorInfo|AccessorPair|ClassPositions;
}
// We make this class abstract because kHeaderSize is a more natural name for
// the part of the class before the variable-size array.
@abstract
@dirtyInstantiatedAbstractClass
extern class DescriptorArray extends HeapObject {
@ -19,6 +26,5 @@ extern class DescriptorArray extends HeapObject {
raw_number_of_marked_descriptors: uint16;
filler16_bits: uint16;
enum_cache: EnumCache;
// DescriptorEntry needs to be a struct with three fields.
// desriptors : DescriptorEntry[number_of_all_descriptors]
descriptors[number_of_all_descriptors]: DescriptorEntry;
}

View File

@ -82,6 +82,7 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
for (const Field& field : type.fields()) {
const Type* field_type = field.name_and_type.type;
if (field_type == TypeOracle::GetVoidType()) continue;
if (field_type->IsStructType()) continue; // Not yet supported.
const std::string& field_name = field.name_and_type.name;
bool is_field_tagged = field_type->IsSubtypeOf(TypeOracle::GetTaggedType());
base::Optional<const ClassType*> field_class_type =

View File

@ -2881,7 +2881,10 @@ class FieldOffsetsGenerator {
// Allow void type for marker constants of size zero.
return current_section_;
}
if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
// Currently struct-valued fields are only allowed to have tagged data; see
// TypeVisitor::VisitClassFieldsAndMethods.
if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType()) ||
f.name_and_type.type->IsStructType()) {
if (f.is_weak) {
return FieldSectionType::kWeakSection;
} else {
@ -3445,47 +3448,33 @@ void ImplementationVisitor::GeneratePrintDefinitions(
namespace {
void GenerateClassFieldVerifier(const std::string& class_name,
const ClassType& class_type, const Field& f,
std::ostream& h_contents,
// Generate verification code for a single piece of class data, which might be
// nested within a struct or might be a single element in an indexed field (or
// both).
void GenerateFieldValueVerifier(const std::string& class_name,
const Field& class_field,
const Field& leaf_field, size_t struct_offset,
std::string field_size,
std::ostream& cc_contents) {
if (!f.generate_verify) return;
const Type* field_type = f.name_and_type.type;
// We only verify tagged types, not raw numbers or pointers.
if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) return;
// Do not verify if the field may be uninitialized.
if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;
if (f.index) {
const Type* index_type = f.index->type;
if (index_type != TypeOracle::GetSmiType()) {
Error("Expected type Smi for indexed field but found type ", *index_type)
.Position(f.pos);
}
// We already verified the index field because it was listed earlier, so we
// can assume it's safe to read here.
cc_contents << " for (int i = 0; i < TaggedField<Smi, " << class_name
<< "::k" << CamelifyString(f.index->name)
<< "Offset>::load(o).value(); ++i) {\n";
} else {
cc_contents << " {\n";
}
const Type* field_type = leaf_field.name_and_type.type;
bool maybe_object =
!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
!field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
const char* object_type = maybe_object ? "MaybeObject" : "Object";
const char* verify_fn =
maybe_object ? "VerifyMaybeObjectPointer" : "VerifyPointer";
const char* index_offset = f.index ? "i * kTaggedSize" : "0";
std::string index_offset = std::to_string(struct_offset);
if (class_field.index) {
index_offset += " + i * " + field_size;
}
// Name the local var based on the field name for nicer CHECK output.
const std::string value = f.name_and_type.name + "__value";
const std::string value = leaf_field.name_and_type.name + "__value";
// Read the field.
cc_contents << " " << object_type << " " << value << " = TaggedField<"
<< object_type << ", " << class_name << "::k"
<< CamelifyString(f.name_and_type.name) << "Offset>::load(o, "
<< index_offset << ");\n";
<< CamelifyString(class_field.name_and_type.name)
<< "Offset>::load(o, " << index_offset << ");\n";
// Call VerifyPointer or VerifyMaybeObjectPointer on it.
cc_contents << " " << object_type << "::" << verify_fn << "(isolate, "
@ -3494,7 +3483,7 @@ void GenerateClassFieldVerifier(const std::string& class_name,
// Check that the value is of an appropriate type. We can skip this part for
// the Object type because it would not check anything beyond what we already
// checked with VerifyPointer.
if (f.name_and_type.type != TypeOracle::GetObjectType()) {
if (field_type != TypeOracle::GetObjectType()) {
std::stringstream type_check;
bool at_start = true;
// If weak pointers are allowed, then start by checking for a cleared value.
@ -3524,6 +3513,58 @@ void GenerateClassFieldVerifier(const std::string& class_name,
}
cc_contents << " CHECK(" << type_check.str() << ");\n";
}
}
void GenerateClassFieldVerifier(const std::string& class_name,
const ClassType& class_type, const Field& f,
std::ostream& h_contents,
std::ostream& cc_contents) {
if (!f.generate_verify) return;
const Type* field_type = f.name_and_type.type;
// We only verify tagged types, not raw numbers or pointers. Structs
// consisting of tagged types are also included.
if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
!field_type->IsStructType())
return;
// Do not verify if the field may be uninitialized.
if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;
if (f.index) {
const Type* index_type = f.index->type;
std::string index_offset =
class_name + "::k" + CamelifyString(f.index->name) + "Offset";
cc_contents << " for (int i = 0; i < ";
if (index_type == TypeOracle::GetSmiType()) {
// We already verified the index field because it was listed earlier, so
// we can assume it's safe to read here.
cc_contents << "TaggedField<Smi, " << index_offset
<< ">::load(o).value()";
} else {
const Type* constexpr_version = index_type->ConstexprVersion();
if (constexpr_version == nullptr) {
Error("constexpr representation for type ", index_type->ToString(),
" is required due to usage as index")
.Position(f.pos);
}
cc_contents << "o.ReadField<" << constexpr_version->GetGeneratedTypeName()
<< ">(" << index_offset << ")";
}
cc_contents << "; ++i) {\n";
} else {
cc_contents << " {\n";
}
if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
for (const Field& field : struct_type->fields()) {
GenerateFieldValueVerifier(class_name, f, field, field.offset,
std::to_string(struct_type->PackedSize()),
cc_contents);
}
} else {
GenerateFieldValueVerifier(class_name, f, f, 0, "kTaggedSize", cc_contents);
}
cc_contents << " }\n";
}

View File

@ -138,6 +138,7 @@ const StructType* TypeVisitor::ComputeType(
CurrentSourcePosition::Scope position_activator(decl->pos);
size_t offset = 0;
bool packable = true;
for (auto& field : decl->fields) {
CurrentSourcePosition::Scope position_activator(
field.name_and_type.type->pos);
@ -146,15 +147,36 @@ const StructType* TypeVisitor::ComputeType(
ReportError("struct field \"", field.name_and_type.name->value,
"\" carries constexpr type \"", *field_type, "\"");
}
struct_type->RegisterField({field.name_and_type.name->pos,
struct_type,
base::nullopt,
{field.name_and_type.name->value, field_type},
offset,
false,
field.const_qualified,
false});
offset += LoweredSlotCount(field_type);
Field f{field.name_and_type.name->pos,
struct_type,
base::nullopt,
{field.name_and_type.name->value, field_type},
offset,
false,
field.const_qualified,
false};
auto optional_size = f.GetOptionalFieldSizeInformation();
// Structs may contain fields that aren't representable in packed form. If
// so, then this field and any subsequent fields should have their offsets
// marked as invalid.
if (!optional_size.has_value()) {
packable = false;
}
if (!packable) {
f.offset = Field::kInvalidOffset;
}
struct_type->RegisterField(f);
// Offsets are assigned based on an assumption of no space between members.
// This might lead to invalid alignment in some cases, but most structs are
// never actually packed in memory together (they just represent a batch of
// CSA TNode values that should be passed around together). For any struct
// that is used as a class field, we verify its offsets when setting up the
// class type.
if (optional_size.has_value()) {
size_t field_size = 0;
std::tie(field_size, std::ignore) = *optional_size;
offset += field_size;
}
}
return struct_type;
}
@ -298,6 +320,22 @@ void TypeVisitor::VisitClassFieldsAndMethods(
ReportError("non-extern classes do not support weak fields");
}
}
if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
for (const Field& struct_field : struct_type->fields()) {
if (!struct_field.name_and_type.type->IsSubtypeOf(
TypeOracle::GetTaggedType())) {
// If we ever actually need different sizes of struct fields, then we
// can define the packing and alignment rules. Until then, let's keep
// it simple. This restriction also helps keep the tagged and untagged
// regions separate in the class layout (see also
// FieldOffsetsGenerator::GetSectionFor).
Error(
"Classes do not support fields which are structs containing "
"untagged data.");
}
}
}
base::Optional<NameAndType> index_field;
if (field_expression.index) {
if (seen_indexed_field ||
(super_class && super_class->HasIndexedField())) {
@ -305,18 +343,8 @@ void TypeVisitor::VisitClassFieldsAndMethods(
"only one indexable field is currently supported per class");
}
seen_indexed_field = true;
const NameAndType& index_field =
class_type->LookupFieldInternal(*field_expression.index)
.name_and_type;
class_type->RegisterField(
{field_expression.name_and_type.name->pos,
class_type,
index_field,
{field_expression.name_and_type.name->value, field_type},
class_offset,
field_expression.weak,
field_expression.const_qualified,
field_expression.generate_verify});
index_field = class_type->LookupFieldInternal(*field_expression.index)
.name_and_type;
} else {
if (seen_indexed_field) {
ReportError("cannot declare non-indexable field \"",
@ -324,27 +352,26 @@ void TypeVisitor::VisitClassFieldsAndMethods(
"\" after an indexable field "
"declaration");
}
const Field& field = class_type->RegisterField(
{field_expression.name_and_type.name->pos,
class_type,
base::nullopt,
{field_expression.name_and_type.name->value, field_type},
class_offset,
field_expression.weak,
field_expression.const_qualified,
field_expression.generate_verify});
size_t field_size;
std::string size_string;
std::string machine_type;
std::tie(field_size, size_string) = field.GetFieldSizeInformation();
// Our allocations don't support alignments beyond kTaggedSize.
size_t alignment = std::min(
static_cast<size_t>(TargetArchitecture::TaggedSize()), field_size);
if (alignment > 0 && class_offset % alignment != 0) {
ReportError("field ", field_expression.name_and_type.name,
" at offset ", class_offset, " is not ", alignment,
"-byte aligned.");
}
}
const Field& field = class_type->RegisterField(
{field_expression.name_and_type.name->pos,
class_type,
index_field,
{field_expression.name_and_type.name->value, field_type},
class_offset,
field_expression.weak,
field_expression.const_qualified,
field_expression.generate_verify});
size_t field_size;
std::tie(field_size, std::ignore) = field.GetFieldSizeInformation();
// Our allocations don't support alignments beyond kTaggedSize.
size_t alignment = std::min(
static_cast<size_t>(TargetArchitecture::TaggedSize()), field_size);
if (alignment > 0 && class_offset % alignment != 0) {
ReportError("field ", field_expression.name_and_type.name, " at offset ",
class_offset, " is not ", alignment, "-byte aligned.");
}
if (!field_expression.index) {
class_offset += field_size;
}
}

View File

@ -336,6 +336,29 @@ std::string StructType::GetGeneratedTypeNameImpl() const {
return generated_type_name_;
}
size_t StructType::PackedSize() const {
size_t result = 0;
if (!fields_.empty()) {
const Field& last = fields_.back();
if (last.offset == Field::kInvalidOffset) {
// This struct can't be packed. Find the first invalid field and use its
// name and position for the error.
for (const Field& field : fields_) {
if (field.offset == Field::kInvalidOffset) {
Error("Cannot compute packed size of ", ToString(), " due to field ",
field.name_and_type.name, " of unknown size")
.Position(field.pos);
return 0;
}
}
}
size_t field_size = 0;
std::tie(field_size, std::ignore) = last.GetFieldSizeInformation();
result = last.offset + field_size;
}
return result;
}
// static
std::string Type::ComputeName(const std::string& basename,
MaybeSpecializationKey specialized_from) {
@ -695,7 +718,18 @@ VisitResult VisitResult::NeverResult() {
}
std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const {
std::string size_string = "#no size";
auto optional = GetOptionalFieldSizeInformation();
if (optional.has_value()) {
return *optional;
}
Error("fields of type ", *name_and_type.type, " are not (yet) supported")
.Position(pos);
return std::make_tuple(0, "#no size");
}
base::Optional<std::tuple<size_t, std::string>>
Field::GetOptionalFieldSizeInformation() const {
std::string size_string;
const Type* field_type = this->name_and_type.type;
size_t field_size = 0;
if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
@ -734,8 +768,12 @@ std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const {
} else if (field_type->IsSubtypeOf(TypeOracle::GetUIntPtrType())) {
field_size = TargetArchitecture::RawPtrSize();
size_string = "kIntptrSize";
} else if (const StructType* struct_type =
StructType::DynamicCast(field_type)) {
field_size = struct_type->PackedSize();
size_string = std::to_string(field_size);
} else {
ReportError("fields of type ", *field_type, " are not (yet) supported");
return {};
}
return std::make_tuple(field_size, size_string);
}

View File

@ -183,14 +183,28 @@ struct Field {
// reliance of string types is quite clunky.
std::tuple<size_t, std::string> GetFieldSizeInformation() const;
// Like GetFieldSizeInformation, but rather than raising an error if the
// field's size is unknown, it returns no value.
base::Optional<std::tuple<size_t, std::string>>
GetOptionalFieldSizeInformation() const;
SourcePosition pos;
const AggregateType* aggregate;
base::Optional<NameAndType> index;
NameAndType name_and_type;
// The byte offset of this field from the beginning of the containing class or
// struct. Most structs are never packed together in memory, and are only used
// to hold a batch of related CSA TNode values, in which case |offset| is
// irrelevant. In structs, this value can be set to kInvalidOffset to indicate
// that the struct should never be used in packed form.
size_t offset;
bool is_weak;
bool const_qualified;
bool generate_verify;
static constexpr size_t kInvalidOffset = SIZE_MAX;
};
std::ostream& operator<<(std::ostream& os, const Field& name_and_type);
@ -503,6 +517,9 @@ class StructType final : public AggregateType {
std::string GetGeneratedTypeNameImpl() const override;
// Returns the sum of the size of all members. Does not validate alignment.
size_t PackedSize() const;
private:
friend class TypeOracle;
StructType(Namespace* nspace, const StructDeclaration* decl,