[torque] multiple arrays in one object
This allows the definition of classes with several arrays and ports SmallOrderedHashTable subclasses to Torque as an example, including the existing CSA allocation functions for them. Overview of changes: - Introduce ResidueClass to encapsulate the modulo-arithmetic necessary to do alignment checks. - Add MachineOperatorReducer to the CSA pipeline to address now missing CSA ad-hoc constant folding that got blocked by a temporary phi. - Allow assignments to references to structs. This is needed to initialize the data_table part of SmallOrderedHashMap. - Make the NumberLiteralExpression AST-node store a double instead of a string. This is necessary to detect arrays with constant size used for padding. - Turn offsets into base::Optional<size_t> to ensure we don't use an invalid or statically unknown offset. - Remove CreateFieldReferenceInstruction since it doesn't work for complex offset computations and the logic can be expressed better in ImplementationVisitor. - Validate alignment of structs embedded in classes. Bug: v8:10004 v8:7793 Change-Id: Ifa414b42278e572a0c577bf9da3d37f80771a258 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1958011 Commit-Queue: Clemens Backes <clemensb@chromium.org> Reviewed-by: Clemens Backes <clemensb@chromium.org> Reviewed-by: Nico Hartmann <nicohartmann@chromium.org> Cr-Commit-Position: refs/heads/master@{#65538}
This commit is contained in:
parent
d2528de478
commit
59e8d45ad8
@ -88,6 +88,15 @@ constexpr bool all(Args... rest) {
|
||||
return fold(std::logical_and<>{}, true, rest...);
|
||||
}
|
||||
|
||||
template <class... Ts>
|
||||
struct make_void {
|
||||
using type = void;
|
||||
};
|
||||
// Corresponds to C++17's std::void_t.
|
||||
// Used for SFINAE based on type errors.
|
||||
template <class... Ts>
|
||||
using void_t = typename make_void<Ts...>::type;
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
||||
|
@ -1531,3 +1531,17 @@ macro ReplaceTheHoleWithUndefined(o: JSAny|TheHole): JSAny {
|
||||
}
|
||||
|
||||
extern macro DecodeScopeInfoHasContextExtension(intptr): intptr;
|
||||
|
||||
struct ConstantIterator<T: type> {
|
||||
macro Empty(): bool {
|
||||
return false;
|
||||
}
|
||||
macro Next(): T labels _NoMore {
|
||||
return this.value;
|
||||
}
|
||||
|
||||
value: T;
|
||||
}
|
||||
macro ConstantIterator<T: type>(value: T): ConstantIterator<T> {
|
||||
return ConstantIterator{value};
|
||||
}
|
||||
|
@ -47,6 +47,11 @@ FromConstexpr<Number, constexpr float64>(f: constexpr float64): Number {
|
||||
FromConstexpr<Number, constexpr int31>(i: constexpr int31): Number {
|
||||
return %FromConstexpr<Number>(i);
|
||||
}
|
||||
FromConstexpr<uint8, constexpr int31>(i: constexpr int31): uint8 {
|
||||
const i: uint32 = i;
|
||||
StaticAssert(i <= 255);
|
||||
return %RawDownCast<uint8>(i);
|
||||
}
|
||||
FromConstexpr<Number, constexpr Smi>(s: constexpr Smi): Number {
|
||||
return SmiConstant(s);
|
||||
}
|
||||
@ -121,6 +126,9 @@ Convert<intptr, uint16>(ui: uint16): intptr {
|
||||
Convert<intptr, uint8>(ui: uint8): intptr {
|
||||
return Signed(ChangeUint32ToWord(ui));
|
||||
}
|
||||
Convert<uint8, intptr>(i: intptr): uint8 {
|
||||
return %RawDownCast<uint8>(Unsigned(TruncateIntPtrToInt32(i)) & 0xFF);
|
||||
}
|
||||
Convert<int32, uint8>(i: uint8): int32 {
|
||||
return Signed(Convert<uint32>(i));
|
||||
}
|
||||
|
@ -3545,100 +3545,6 @@ CodeStubAssembler::AllocateOrderedHashTable<OrderedHashMap>();
|
||||
template TNode<OrderedHashSet>
|
||||
CodeStubAssembler::AllocateOrderedHashTable<OrderedHashSet>();
|
||||
|
||||
template <typename CollectionType>
|
||||
TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
|
||||
TNode<IntPtrT> capacity) {
|
||||
CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
|
||||
CSA_ASSERT(this, IntPtrLessThan(
|
||||
capacity, IntPtrConstant(CollectionType::kMaxCapacity)));
|
||||
|
||||
TNode<IntPtrT> data_table_start_offset =
|
||||
IntPtrConstant(CollectionType::DataTableStartOffset());
|
||||
|
||||
TNode<IntPtrT> data_table_size = IntPtrMul(
|
||||
capacity, IntPtrConstant(CollectionType::kEntrySize * kTaggedSize));
|
||||
|
||||
TNode<Int32T> hash_table_size =
|
||||
Int32Div(TruncateIntPtrToInt32(capacity),
|
||||
Int32Constant(CollectionType::kLoadFactor));
|
||||
|
||||
TNode<IntPtrT> hash_table_start_offset =
|
||||
IntPtrAdd(data_table_start_offset, data_table_size);
|
||||
|
||||
TNode<IntPtrT> hash_table_and_chain_table_size =
|
||||
IntPtrAdd(ChangeInt32ToIntPtr(hash_table_size), capacity);
|
||||
|
||||
TNode<IntPtrT> total_size =
|
||||
IntPtrAdd(hash_table_start_offset, hash_table_and_chain_table_size);
|
||||
|
||||
TNode<IntPtrT> total_size_word_aligned =
|
||||
IntPtrAdd(total_size, IntPtrConstant(kTaggedSize - 1));
|
||||
total_size_word_aligned = ChangeInt32ToIntPtr(
|
||||
Int32Div(TruncateIntPtrToInt32(total_size_word_aligned),
|
||||
Int32Constant(kTaggedSize)));
|
||||
total_size_word_aligned =
|
||||
UncheckedCast<IntPtrT>(TimesTaggedSize(total_size_word_aligned));
|
||||
|
||||
// Allocate the table and add the proper map.
|
||||
TNode<Map> small_ordered_hash_map =
|
||||
CAST(LoadRoot(CollectionType::GetMapRootIndex()));
|
||||
TNode<HeapObject> table_obj = AllocateInNewSpace(total_size_word_aligned);
|
||||
StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map);
|
||||
TNode<CollectionType> table = UncheckedCast<CollectionType>(table_obj);
|
||||
|
||||
{
|
||||
// This store overlaps with the header fields stored below.
|
||||
// Since it happens first, it effectively still just zero-initializes the
|
||||
// padding.
|
||||
constexpr int offset =
|
||||
RoundDown<kTaggedSize>(CollectionType::PaddingOffset());
|
||||
STATIC_ASSERT(offset + kTaggedSize == CollectionType::PaddingOffset() +
|
||||
CollectionType::PaddingSize());
|
||||
StoreObjectFieldNoWriteBarrier(table, offset, SmiConstant(0));
|
||||
}
|
||||
|
||||
// Initialize the SmallOrderedHashTable fields.
|
||||
StoreObjectByteNoWriteBarrier(
|
||||
table, CollectionType::NumberOfBucketsOffset(),
|
||||
Word32And(Int32Constant(0xFF), hash_table_size));
|
||||
StoreObjectByteNoWriteBarrier(table, CollectionType::NumberOfElementsOffset(),
|
||||
Int32Constant(0));
|
||||
StoreObjectByteNoWriteBarrier(
|
||||
table, CollectionType::NumberOfDeletedElementsOffset(), Int32Constant(0));
|
||||
|
||||
TNode<IntPtrT> table_address =
|
||||
IntPtrSub(BitcastTaggedToWord(table), IntPtrConstant(kHeapObjectTag));
|
||||
TNode<IntPtrT> hash_table_start_address =
|
||||
IntPtrAdd(table_address, hash_table_start_offset);
|
||||
|
||||
// Initialize the HashTable part.
|
||||
TNode<ExternalReference> memset =
|
||||
ExternalConstant(ExternalReference::libc_memset_function());
|
||||
CallCFunction(
|
||||
memset, MachineType::AnyTagged(),
|
||||
std::make_pair(MachineType::Pointer(), hash_table_start_address),
|
||||
std::make_pair(MachineType::IntPtr(), IntPtrConstant(0xFF)),
|
||||
std::make_pair(MachineType::UintPtr(), hash_table_and_chain_table_size));
|
||||
|
||||
// Initialize the DataTable part.
|
||||
TNode<Oddball> filler = TheHoleConstant();
|
||||
TNode<IntPtrT> data_table_start_address =
|
||||
IntPtrAdd(table_address, data_table_start_offset);
|
||||
TNode<IntPtrT> data_table_end_address =
|
||||
IntPtrAdd(data_table_start_address, data_table_size);
|
||||
StoreFieldsNoWriteBarrier(data_table_start_address, data_table_end_address,
|
||||
filler);
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
template V8_EXPORT_PRIVATE TNode<SmallOrderedHashMap>
|
||||
CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashMap>(
|
||||
TNode<IntPtrT> capacity);
|
||||
template V8_EXPORT_PRIVATE TNode<SmallOrderedHashSet>
|
||||
CodeStubAssembler::AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
|
||||
TNode<IntPtrT> capacity);
|
||||
|
||||
TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
|
||||
TNode<Map> map, base::Optional<TNode<HeapObject>> properties,
|
||||
base::Optional<TNode<FixedArray>> elements, AllocationFlags flags,
|
||||
|
@ -133,6 +133,12 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
|
||||
V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
|
||||
V(SloppyArgumentsElementsMap, sloppy_arguments_elements_map, \
|
||||
SloppyArgumentsElementsMap) \
|
||||
V(SmallOrderedHashSetMap, small_ordered_hash_set_map, \
|
||||
SmallOrderedHashSetMap) \
|
||||
V(SmallOrderedHashMapMap, small_ordered_hash_map_map, \
|
||||
SmallOrderedHashMapMap) \
|
||||
V(SmallOrderedNameDictionaryMap, small_ordered_name_dictionary_map, \
|
||||
SmallOrderedNameDictionaryMap) \
|
||||
V(species_symbol, species_symbol, SpeciesSymbol) \
|
||||
V(StaleRegister, stale_register, StaleRegister) \
|
||||
V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
|
||||
@ -1792,9 +1798,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
template <typename CollectionType>
|
||||
TNode<CollectionType> AllocateOrderedHashTable();
|
||||
|
||||
template <typename CollectionType>
|
||||
TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity);
|
||||
|
||||
TNode<JSObject> AllocateJSObjectFromMap(
|
||||
TNode<Map> map,
|
||||
base::Optional<TNode<HeapObject>> properties = base::nullopt,
|
||||
|
@ -150,6 +150,12 @@ template <class Type, class Enable = void>
|
||||
struct MachineRepresentationOf {
|
||||
static const MachineRepresentation value = Type::kMachineRepresentation;
|
||||
};
|
||||
// If T defines kMachineType, then we take the machine representation from
|
||||
// there.
|
||||
template <class T>
|
||||
struct MachineRepresentationOf<T, base::void_t<decltype(T::kMachineType)>> {
|
||||
static const MachineRepresentation value = T::kMachineType.representation();
|
||||
};
|
||||
template <class T>
|
||||
struct MachineRepresentationOf<
|
||||
T, typename std::enable_if<std::is_base_of<Object, T>::value>::type> {
|
||||
|
@ -1891,6 +1891,7 @@ struct CsaEarlyOptimizationPhase {
|
||||
GraphReducer graph_reducer(temp_zone, data->graph(),
|
||||
&data->info()->tick_counter(),
|
||||
data->jsgraph()->Dead());
|
||||
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
|
||||
BranchElimination branch_condition_elimination(&graph_reducer,
|
||||
data->jsgraph(), temp_zone);
|
||||
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
|
||||
@ -1901,6 +1902,7 @@ struct CsaEarlyOptimizationPhase {
|
||||
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
|
||||
CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
|
||||
temp_zone);
|
||||
AddReducer(data, &graph_reducer, &machine_reducer);
|
||||
AddReducer(data, &graph_reducer, &branch_condition_elimination);
|
||||
AddReducer(data, &graph_reducer, &dead_code_elimination);
|
||||
AddReducer(data, &graph_reducer, &common_reducer);
|
||||
|
@ -230,16 +230,6 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
|
||||
break;
|
||||
case FILLER_TYPE:
|
||||
break;
|
||||
case SMALL_ORDERED_HASH_SET_TYPE:
|
||||
SmallOrderedHashSet::cast(*this).SmallOrderedHashSetVerify(isolate);
|
||||
break;
|
||||
case SMALL_ORDERED_HASH_MAP_TYPE:
|
||||
SmallOrderedHashMap::cast(*this).SmallOrderedHashMapVerify(isolate);
|
||||
break;
|
||||
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
|
||||
SmallOrderedNameDictionary::cast(*this).SmallOrderedNameDictionaryVerify(
|
||||
isolate);
|
||||
break;
|
||||
case CODE_DATA_CONTAINER_TYPE:
|
||||
CodeDataContainer::cast(*this).CodeDataContainerVerify(isolate);
|
||||
break;
|
||||
|
@ -244,9 +244,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
|
||||
case THIN_ONE_BYTE_STRING_TYPE:
|
||||
case UNCACHED_EXTERNAL_STRING_TYPE:
|
||||
case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
|
||||
case SMALL_ORDERED_HASH_MAP_TYPE:
|
||||
case SMALL_ORDERED_HASH_SET_TYPE:
|
||||
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
|
||||
// TODO(all): Handle these types too.
|
||||
os << "UNKNOWN TYPE " << map().instance_type();
|
||||
UNREACHABLE();
|
||||
@ -1339,6 +1336,22 @@ void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
|
||||
}
|
||||
}
|
||||
|
||||
void SmallOrderedHashSet::SmallOrderedHashSetPrint(std::ostream& os) {
|
||||
PrintHeader(os, "SmallOrderedHashSet");
|
||||
// TODO(tebbi): Print all fields.
|
||||
}
|
||||
|
||||
void SmallOrderedHashMap::SmallOrderedHashMapPrint(std::ostream& os) {
|
||||
PrintHeader(os, "SmallOrderedHashMap");
|
||||
// TODO(tebbi): Print all fields.
|
||||
}
|
||||
|
||||
void SmallOrderedNameDictionary::SmallOrderedNameDictionaryPrint(
|
||||
std::ostream& os) {
|
||||
PrintHeader(os, "SmallOrderedNameDictionary");
|
||||
// TODO(tebbi): Print all fields.
|
||||
}
|
||||
|
||||
void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
|
||||
PrintHeader(os, "SharedFunctionInfo");
|
||||
os << "\n - name: ";
|
||||
|
@ -2,9 +2,112 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include 'src/objects/ordered-hash-table.h'
|
||||
|
||||
// Using int as a dummy type-parameter to get access to these constants which
|
||||
// don't actually depend on the derived class. This avoids accidentially
|
||||
// depending on something from a concrete derived class.
|
||||
const kSmallOrderedHashTableMaxCapacity: constexpr int31
|
||||
generates 'SmallOrderedHashTable<int>::kMaxCapacity';
|
||||
const kSmallOrderedHashTableNotFound: constexpr int31
|
||||
generates 'SmallOrderedHashTable<int>::kNotFound';
|
||||
const kSmallOrderedHashTableLoadFactor: constexpr int31
|
||||
generates 'SmallOrderedHashTable<int>::kLoadFactor';
|
||||
|
||||
@noVerifier
|
||||
@abstract
|
||||
extern class SmallOrderedHashTable extends HeapObject
|
||||
generates 'TNode<HeapObject>';
|
||||
extern class SmallOrderedHashMap extends SmallOrderedHashTable;
|
||||
extern class SmallOrderedHashSet extends SmallOrderedHashTable;
|
||||
extern class SmallOrderedNameDictionary extends SmallOrderedHashTable;
|
||||
generates 'TNode<HeapObject>' {
|
||||
}
|
||||
|
||||
extern macro SmallOrderedHashSetMapConstant(): Map;
|
||||
const kSmallOrderedHashSetMap: Map = SmallOrderedHashSetMapConstant();
|
||||
|
||||
@noVerifier
|
||||
extern class SmallOrderedHashSet extends SmallOrderedHashTable {
|
||||
number_of_elements: uint8;
|
||||
number_of_deleted_elements: uint8;
|
||||
number_of_buckets: uint8;
|
||||
@if(TAGGED_SIZE_8_BYTES) padding[5]: uint8;
|
||||
@ifnot(TAGGED_SIZE_8_BYTES) padding[1]: uint8;
|
||||
data_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
|
||||
JSAny|TheHole;
|
||||
hash_table[number_of_buckets]: uint8;
|
||||
chain_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
|
||||
uint8;
|
||||
}
|
||||
|
||||
@export
|
||||
macro AllocateSmallOrderedHashSet(capacity: intptr): SmallOrderedHashSet {
|
||||
const hashTableSize = capacity / kSmallOrderedHashTableLoadFactor;
|
||||
assert(
|
||||
0 <= hashTableSize && hashTableSize <= kSmallOrderedHashTableMaxCapacity);
|
||||
return new SmallOrderedHashSet{
|
||||
map: kSmallOrderedHashSetMap,
|
||||
number_of_elements: 0,
|
||||
number_of_deleted_elements: 0,
|
||||
number_of_buckets: (Convert<uint8>(hashTableSize)),
|
||||
padding: ...ConstantIterator<uint8>(0),
|
||||
data_table: ...ConstantIterator(TheHole),
|
||||
hash_table: ...ConstantIterator<uint8>(kSmallOrderedHashTableNotFound),
|
||||
chain_table: ...ConstantIterator<uint8>(kSmallOrderedHashTableNotFound)
|
||||
};
|
||||
}
|
||||
|
||||
struct HashMapEntry {
|
||||
key: JSAny|TheHole;
|
||||
value: JSAny|TheHole;
|
||||
}
|
||||
|
||||
extern macro SmallOrderedHashMapMapConstant(): Map;
|
||||
const kSmallOrderedHashMapMap: Map = SmallOrderedHashMapMapConstant();
|
||||
|
||||
@noVerifier
|
||||
extern class SmallOrderedHashMap extends SmallOrderedHashTable {
|
||||
number_of_elements: uint8;
|
||||
number_of_deleted_elements: uint8;
|
||||
number_of_buckets: uint8;
|
||||
@if(TAGGED_SIZE_8_BYTES) padding[5]: uint8;
|
||||
@ifnot(TAGGED_SIZE_8_BYTES) padding[1]: uint8;
|
||||
data_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
|
||||
HashMapEntry;
|
||||
hash_table[number_of_buckets]: uint8;
|
||||
chain_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
|
||||
uint8;
|
||||
}
|
||||
|
||||
@export
|
||||
macro AllocateSmallOrderedHashMap(capacity: intptr): SmallOrderedHashMap {
|
||||
const hashTableSize = capacity / kSmallOrderedHashTableLoadFactor;
|
||||
assert(
|
||||
0 <= hashTableSize && hashTableSize <= kSmallOrderedHashTableMaxCapacity);
|
||||
return new SmallOrderedHashMap{
|
||||
map: kSmallOrderedHashMapMap,
|
||||
number_of_elements: 0,
|
||||
number_of_deleted_elements: 0,
|
||||
number_of_buckets: (Convert<uint8>(hashTableSize)),
|
||||
padding: ...ConstantIterator<uint8>(0),
|
||||
data_table: ...ConstantIterator(HashMapEntry{key: TheHole, value: TheHole}),
|
||||
hash_table: ...ConstantIterator<uint8>(kSmallOrderedHashTableNotFound),
|
||||
chain_table: ...ConstantIterator<uint8>(kSmallOrderedHashTableNotFound)
|
||||
};
|
||||
}
|
||||
|
||||
struct NameDictionaryEntry {
|
||||
key: JSAny|TheHole;
|
||||
value: JSAny|TheHole;
|
||||
property_details: Smi|TheHole;
|
||||
}
|
||||
|
||||
@noVerifier
|
||||
extern class SmallOrderedNameDictionary extends SmallOrderedHashTable {
|
||||
hash: int32;
|
||||
number_of_elements: uint8;
|
||||
number_of_deleted_elements: uint8;
|
||||
number_of_buckets: uint8;
|
||||
padding: uint8;
|
||||
data_table[Convert<intptr>(number_of_buckets) * kSmallOrderedHashTableLoadFactor]:
|
||||
NameDictionaryEntry;
|
||||
hash_table[number_of_buckets]: uint8;
|
||||
chain_table[number_of_buckets]: uint8;
|
||||
}
|
||||
|
@ -426,14 +426,14 @@ struct StringLiteralExpression : Expression {
|
||||
|
||||
struct NumberLiteralExpression : Expression {
|
||||
DEFINE_AST_NODE_LEAF_BOILERPLATE(NumberLiteralExpression)
|
||||
NumberLiteralExpression(SourcePosition pos, std::string name)
|
||||
: Expression(kKind, pos), number(std::move(name)) {}
|
||||
NumberLiteralExpression(SourcePosition pos, double number)
|
||||
: Expression(kKind, pos), number(number) {}
|
||||
|
||||
void VisitAllSubExpressions(VisitCallback callback) override {
|
||||
callback(this);
|
||||
}
|
||||
|
||||
std::string number;
|
||||
double number;
|
||||
};
|
||||
|
||||
struct ElementAccessExpression : LocationExpression {
|
||||
|
@ -118,7 +118,7 @@ void GenerateFieldAddressAccessor(const Field& field,
|
||||
h_contents << " uintptr_t " << address_getter << "() const;\n";
|
||||
cc_contents << "\nuintptr_t Tq" << class_name << "::" << address_getter
|
||||
<< "() const {\n";
|
||||
cc_contents << " return address_ - i::kHeapObjectTag + " << field.offset
|
||||
cc_contents << " return address_ - i::kHeapObjectTag + " << *field.offset
|
||||
<< ";\n";
|
||||
cc_contents << "}\n";
|
||||
}
|
||||
@ -262,7 +262,7 @@ void GenerateGetPropsChunkForField(const Field& field,
|
||||
<< struct_field_type.GetOriginalType(kAsStoredInHeap)
|
||||
<< "\", \""
|
||||
<< struct_field_type.GetOriginalType(kUncompressed)
|
||||
<< "\", " << struct_field.offset << "));\n";
|
||||
<< "\", " << *struct_field.offset << "));\n";
|
||||
}
|
||||
struct_field_list = "std::move(" + struct_field_list + ")";
|
||||
}
|
||||
@ -396,6 +396,10 @@ void GenerateClassDebugReader(const ClassType& type, std::ostream& h_contents,
|
||||
|
||||
for (const Field& field : type.fields()) {
|
||||
if (field.name_and_type.type == TypeOracle::GetVoidType()) continue;
|
||||
if (!field.offset.has_value()) {
|
||||
// Fields with dynamic offset are currently unsupported.
|
||||
continue;
|
||||
}
|
||||
GenerateFieldAddressAccessor(field, name, h_contents, cc_contents);
|
||||
GenerateFieldValueAccessor(field, name, h_contents, cc_contents);
|
||||
base::Optional<NameAndType> array_length;
|
||||
|
@ -207,10 +207,13 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
|
||||
ReportError("%RawDownCast error: ", *return_type, " is not a subtype of ",
|
||||
*original_type);
|
||||
}
|
||||
if (return_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
|
||||
if (return_type->GetGeneratedTNodeTypeName() !=
|
||||
original_type->GetGeneratedTNodeTypeName()) {
|
||||
if (return_type->GetGeneratedTNodeTypeName() !=
|
||||
original_type->GetGeneratedTNodeTypeName()) {
|
||||
if (return_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
|
||||
out_ << "TORQUE_CAST";
|
||||
} else {
|
||||
out_ << "ca_.UncheckedCast<" << return_type->GetGeneratedTNodeTypeName()
|
||||
<< ">";
|
||||
}
|
||||
}
|
||||
} else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
|
||||
@ -675,26 +678,6 @@ void CSAGenerator::EmitInstruction(const UnsafeCastInstruction& instruction,
|
||||
">(" + stack->Top() + ")");
|
||||
}
|
||||
|
||||
void CSAGenerator::EmitInstruction(
|
||||
const CreateFieldReferenceInstruction& instruction,
|
||||
Stack<std::string>* stack) {
|
||||
base::Optional<const ClassType*> class_type =
|
||||
instruction.type->ClassSupertype();
|
||||
if (!class_type.has_value()) {
|
||||
ReportError("Cannot create field reference of type ", instruction.type,
|
||||
" which does not inherit from a class type");
|
||||
}
|
||||
const Field& field = class_type.value()->LookupField(instruction.field_name);
|
||||
std::string offset_name = FreshNodeName();
|
||||
stack->Push(offset_name);
|
||||
|
||||
out_ << " TNode<IntPtrT> " << offset_name << " = ca_.IntPtrConstant(";
|
||||
out_ << field.aggregate->name() << "::k"
|
||||
<< CamelifyString(field.name_and_type.name) << "Offset";
|
||||
out_ << ");\n"
|
||||
<< " USE(" << stack->Top() << ");\n";
|
||||
}
|
||||
|
||||
void CSAGenerator::EmitInstruction(const LoadReferenceInstruction& instruction,
|
||||
Stack<std::string>* stack) {
|
||||
std::string result_name = FreshNodeName();
|
||||
@ -716,7 +699,9 @@ void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
|
||||
std::string offset = stack->Pop();
|
||||
std::string object = stack->Pop();
|
||||
|
||||
out_ << " CodeStubAssembler(state_).StoreReference(CodeStubAssembler::"
|
||||
out_ << " CodeStubAssembler(state_).StoreReference<"
|
||||
<< instruction.type->GetGeneratedTNodeTypeName()
|
||||
<< ">(CodeStubAssembler::"
|
||||
"Reference{"
|
||||
<< object << ", " << offset << "}, " << value << ");\n";
|
||||
}
|
||||
|
@ -99,12 +99,13 @@ class TargetArchitecture : public ContextualClass<TargetArchitecture> {
|
||||
public:
|
||||
explicit TargetArchitecture(bool force_32bit);
|
||||
|
||||
static int TaggedSize() { return Get().tagged_size_; }
|
||||
static int RawPtrSize() { return Get().raw_ptr_size_; }
|
||||
static size_t TaggedSize() { return Get().tagged_size_; }
|
||||
static size_t RawPtrSize() { return Get().raw_ptr_size_; }
|
||||
static size_t MaxHeapAlignment() { return TaggedSize(); }
|
||||
|
||||
private:
|
||||
const int tagged_size_;
|
||||
const int raw_ptr_size_;
|
||||
const size_t tagged_size_;
|
||||
const size_t raw_ptr_size_;
|
||||
};
|
||||
|
||||
} // namespace torque
|
||||
|
@ -772,18 +772,16 @@ VisitResult ImplementationVisitor::Visit(AssignmentExpression* expr) {
|
||||
}
|
||||
|
||||
VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
|
||||
// TODO(tebbi): Do not silently loose precision; support 64bit literals.
|
||||
double d = std::stod(expr->number.c_str());
|
||||
int32_t i = static_cast<int32_t>(d);
|
||||
int32_t i = static_cast<int32_t>(expr->number);
|
||||
const Type* result_type = TypeOracle::GetConstFloat64Type();
|
||||
if (i == d) {
|
||||
if (i == expr->number) {
|
||||
if ((i >> 30) == (i >> 31)) {
|
||||
result_type = TypeOracle::GetConstInt31Type();
|
||||
} else {
|
||||
result_type = TypeOracle::GetConstInt32Type();
|
||||
}
|
||||
}
|
||||
return VisitResult{result_type, expr->number};
|
||||
return VisitResult{result_type, ToString(expr->number)};
|
||||
}
|
||||
|
||||
VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
|
||||
@ -1239,31 +1237,101 @@ InitializerResults ImplementationVisitor::VisitInitializerResults(
|
||||
}
|
||||
|
||||
LocationReference ImplementationVisitor::GenerateFieldReference(
|
||||
VisitResult object, const NameAndType& field, const ClassType* class_type) {
|
||||
GenerateCopy(object);
|
||||
assembler().Emit(CreateFieldReferenceInstruction{class_type, field.name});
|
||||
VisitResult heap_reference(TypeOracle::GetReferenceType(field.type),
|
||||
assembler().TopRange(2));
|
||||
return LocationReference::HeapReference(heap_reference);
|
||||
VisitResult object, const Field& field, const ClassType* class_type) {
|
||||
StackRange result_range = assembler().TopRange(0);
|
||||
result_range.Extend(GenerateCopy(object).stack_range());
|
||||
VisitResult offset;
|
||||
if (field.offset.has_value()) {
|
||||
offset =
|
||||
VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
|
||||
offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
|
||||
} else {
|
||||
StackScope stack_scope(this);
|
||||
for (const Field& f : class_type->ComputeAllFields()) {
|
||||
if (f.offset) {
|
||||
offset =
|
||||
VisitResult(TypeOracle::GetConstInt31Type(), ToString(*f.offset));
|
||||
}
|
||||
if (f.name_and_type.name == field.name_and_type.name) break;
|
||||
if (f.index) {
|
||||
if (!offset.IsOnStack()) {
|
||||
offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
|
||||
}
|
||||
VisitResult array_length = GenerateArrayLength(object, f);
|
||||
size_t element_size;
|
||||
std::string element_size_string;
|
||||
std::tie(element_size, element_size_string) =
|
||||
*SizeOf(f.name_and_type.type);
|
||||
VisitResult array_element_size =
|
||||
VisitResult(TypeOracle::GetConstInt31Type(), element_size_string);
|
||||
// In contrast to the code used for allocation, we don't need overflow
|
||||
// checks here because we already know all the offsets fit into memory.
|
||||
VisitResult array_size =
|
||||
GenerateCall("*", {{array_length, array_element_size}, {}});
|
||||
offset = GenerateCall("+", {{offset, array_size}, {}});
|
||||
}
|
||||
}
|
||||
DCHECK(offset.IsOnStack());
|
||||
offset = stack_scope.Yield(offset);
|
||||
}
|
||||
result_range.Extend(offset.stack_range());
|
||||
if (field.index) {
|
||||
VisitResult length = GenerateArrayLength(object, field);
|
||||
result_range.Extend(length.stack_range());
|
||||
const Type* slice_type = TypeOracle::GetSliceType(field.name_and_type.type);
|
||||
return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
|
||||
|
||||
} else {
|
||||
VisitResult heap_reference(
|
||||
TypeOracle::GetReferenceType(field.name_and_type.type), result_range);
|
||||
return LocationReference::HeapReference(heap_reference);
|
||||
}
|
||||
}
|
||||
|
||||
// This is used to generate field references during initialization, where we can
|
||||
// re-use the offsets used for computing the allocation size.
|
||||
LocationReference ImplementationVisitor::GenerateFieldReference(
|
||||
VisitResult object, const Field& field,
|
||||
const LayoutForInitialization& layout) {
|
||||
StackRange result_range = assembler().TopRange(0);
|
||||
result_range.Extend(GenerateCopy(object).stack_range());
|
||||
VisitResult offset = GenerateImplicitConvert(
|
||||
TypeOracle::GetIntPtrType(), layout.offsets.at(field.name_and_type.name));
|
||||
result_range.Extend(offset.stack_range());
|
||||
if (field.index) {
|
||||
VisitResult length =
|
||||
GenerateCopy(layout.array_lengths.at(field.name_and_type.name));
|
||||
result_range.Extend(length.stack_range());
|
||||
const Type* slice_type = TypeOracle::GetSliceType(field.name_and_type.type);
|
||||
return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
|
||||
} else {
|
||||
VisitResult heap_reference(
|
||||
TypeOracle::GetReferenceType(field.name_and_type.type), result_range);
|
||||
return LocationReference::HeapReference(heap_reference);
|
||||
}
|
||||
}
|
||||
|
||||
void ImplementationVisitor::InitializeClass(
|
||||
const ClassType* class_type, VisitResult allocate_result,
|
||||
const InitializerResults& initializer_results) {
|
||||
const InitializerResults& initializer_results,
|
||||
const LayoutForInitialization& layout) {
|
||||
if (const ClassType* super = class_type->GetSuperClass()) {
|
||||
InitializeClass(super, allocate_result, initializer_results);
|
||||
InitializeClass(super, allocate_result, initializer_results, layout);
|
||||
}
|
||||
|
||||
for (Field f : class_type->fields()) {
|
||||
VisitResult current_value =
|
||||
VisitResult initializer_value =
|
||||
initializer_results.field_value_map.at(f.name_and_type.name);
|
||||
LocationReference field =
|
||||
GenerateFieldReference(allocate_result, f, layout);
|
||||
if (f.index) {
|
||||
InitializeFieldFromSpread(allocate_result, f, initializer_results,
|
||||
class_type);
|
||||
DCHECK(field.IsHeapSlice());
|
||||
VisitResult slice = field.GetVisitResult();
|
||||
GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
|
||||
"InitializeFieldsFromIterator"),
|
||||
{{slice, initializer_value}, {}});
|
||||
} else {
|
||||
GenerateAssignToLocation(
|
||||
GenerateFieldReference(allocate_result, f.name_and_type, class_type),
|
||||
current_value);
|
||||
GenerateAssignToLocation(field, initializer_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1272,6 +1340,7 @@ VisitResult ImplementationVisitor::GenerateArrayLength(
|
||||
Expression* array_length, Namespace* nspace,
|
||||
const std::map<std::string, LocationReference>& bindings) {
|
||||
StackScope stack_scope(this);
|
||||
CurrentSourcePosition::Scope pos_scope(array_length->pos);
|
||||
// Switch to the namespace where the class was declared.
|
||||
CurrentScope::Scope current_scope_scope(nspace);
|
||||
// Reset local bindings and install local binding for the preceding fields.
|
||||
@ -1296,9 +1365,8 @@ VisitResult ImplementationVisitor::GenerateArrayLength(VisitResult object,
|
||||
std::map<std::string, LocationReference> bindings;
|
||||
for (Field f : class_type->ComputeAllFields()) {
|
||||
if (f.index) break;
|
||||
bindings.insert({f.name_and_type.name,
|
||||
GenerateFieldReference(object, f.name_and_type,
|
||||
*object.type()->ClassSupertype())});
|
||||
bindings.insert(
|
||||
{f.name_and_type.name, GenerateFieldReference(object, f, class_type)});
|
||||
}
|
||||
return stack_scope.Yield(
|
||||
GenerateArrayLength(*field.index, class_type->nspace(), bindings));
|
||||
@ -1322,19 +1390,17 @@ VisitResult ImplementationVisitor::GenerateArrayLength(
|
||||
GenerateArrayLength(*field.index, class_type->nspace(), bindings));
|
||||
}
|
||||
|
||||
VisitResult ImplementationVisitor::GenerateObjectSize(
|
||||
LayoutForInitialization ImplementationVisitor::GenerateLayoutForInitialization(
|
||||
const ClassType* class_type,
|
||||
const InitializerResults& initializer_results) {
|
||||
StackScope stack_scope(this);
|
||||
if (base::Optional<size_t> size = class_type->size()) {
|
||||
return VisitResult(TypeOracle::GetConstInt31Type(), ToString(*size));
|
||||
}
|
||||
size_t header_size = class_type->header_size();
|
||||
DCHECK_GT(header_size, 0);
|
||||
VisitResult size =
|
||||
VisitResult(TypeOracle::GetConstInt31Type(), ToString(header_size));
|
||||
bool needs_dynamic_size_alignment = false;
|
||||
LayoutForInitialization layout;
|
||||
VisitResult offset;
|
||||
for (Field f : class_type->ComputeAllFields()) {
|
||||
if (f.offset.has_value()) {
|
||||
offset =
|
||||
VisitResult(TypeOracle::GetConstInt31Type(), ToString(*f.offset));
|
||||
}
|
||||
layout.offsets[f.name_and_type.name] = offset;
|
||||
if (f.index) {
|
||||
size_t element_size;
|
||||
std::string element_size_string;
|
||||
@ -1342,50 +1408,33 @@ VisitResult ImplementationVisitor::GenerateObjectSize(
|
||||
*SizeOf(f.name_and_type.type);
|
||||
VisitResult array_element_size =
|
||||
VisitResult(TypeOracle::GetConstInt31Type(), element_size_string);
|
||||
VisitResult array_length =
|
||||
GenerateArrayLength(class_type, initializer_results, f);
|
||||
layout.array_lengths[f.name_and_type.name] = array_length;
|
||||
Arguments arguments;
|
||||
arguments.parameters = {
|
||||
size, initializer_results.array_lengths.at(f.name_and_type.name),
|
||||
array_element_size};
|
||||
size = GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
|
||||
"AddIndexedFieldSizeToObjectSize"),
|
||||
arguments);
|
||||
if (element_size % TargetArchitecture::TaggedSize() != 0) {
|
||||
needs_dynamic_size_alignment = true;
|
||||
}
|
||||
arguments.parameters = {offset, array_length, array_element_size};
|
||||
offset = GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
|
||||
"AddIndexedFieldSizeToObjectSize"),
|
||||
arguments);
|
||||
} else {
|
||||
DCHECK(f.offset.has_value());
|
||||
}
|
||||
}
|
||||
if (needs_dynamic_size_alignment) {
|
||||
if (class_type->size().SingleValue()) {
|
||||
layout.size = VisitResult(TypeOracle::GetConstInt31Type(),
|
||||
ToString(*class_type->size().SingleValue()));
|
||||
} else {
|
||||
layout.size = offset;
|
||||
}
|
||||
if ((size_t{1} << class_type->size().AlignmentLog2()) <
|
||||
TargetArchitecture::TaggedSize()) {
|
||||
Arguments arguments;
|
||||
arguments.parameters = {size};
|
||||
size = GenerateCall(
|
||||
arguments.parameters = {layout.size};
|
||||
layout.size = GenerateCall(
|
||||
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AlignTagged"),
|
||||
arguments);
|
||||
}
|
||||
return stack_scope.Yield(size);
|
||||
}
|
||||
|
||||
void ImplementationVisitor::InitializeFieldFromSpread(
|
||||
VisitResult object, const Field& field,
|
||||
const InitializerResults& initializer_results,
|
||||
const ClassType* class_type) {
|
||||
StackScope stack_scope(this);
|
||||
|
||||
VisitResult iterator =
|
||||
initializer_results.field_value_map.at(field.name_and_type.name);
|
||||
VisitResult length =
|
||||
initializer_results.array_lengths.at(field.name_and_type.name);
|
||||
|
||||
GenerateCopy(object);
|
||||
assembler().Emit(
|
||||
CreateFieldReferenceInstruction{class_type, field.name_and_type.name});
|
||||
DCHECK_EQ(length.stack_range().Size(), 1);
|
||||
GenerateCopy(length);
|
||||
const Type* slice_type = TypeOracle::GetSliceType(field.name_and_type.type);
|
||||
VisitResult slice = VisitResult(slice_type, assembler().TopRange(3));
|
||||
|
||||
GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
|
||||
"InitializeFieldsFromIterator"),
|
||||
Arguments{{slice, iterator}, {}});
|
||||
return layout;
|
||||
}
|
||||
|
||||
VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
|
||||
@ -1407,7 +1456,7 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
|
||||
VisitInitializerResults(class_type, expr->initializers);
|
||||
|
||||
const Field& map_field = class_type->LookupField("map");
|
||||
if (map_field.offset != 0) {
|
||||
if (*map_field.offset != 0) {
|
||||
ReportError("class initializers must have a map as first parameter");
|
||||
}
|
||||
const std::map<std::string, VisitResult>& initializer_fields =
|
||||
@ -1444,24 +1493,18 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
|
||||
class_type->ComputeAllFields(),
|
||||
expr->initializers, !class_type->IsExtern());
|
||||
|
||||
for (const Field& f : class_type->ComputeAllFields()) {
|
||||
if (f.index) {
|
||||
initializer_results.array_lengths[f.name_and_type.name] =
|
||||
GenerateArrayLength(class_type, initializer_results, f);
|
||||
}
|
||||
}
|
||||
|
||||
VisitResult object_size = GenerateObjectSize(class_type, initializer_results);
|
||||
LayoutForInitialization layout =
|
||||
GenerateLayoutForInitialization(class_type, initializer_results);
|
||||
|
||||
Arguments allocate_arguments;
|
||||
allocate_arguments.parameters.push_back(object_size);
|
||||
allocate_arguments.parameters.push_back(layout.size);
|
||||
allocate_arguments.parameters.push_back(object_map);
|
||||
VisitResult allocate_result = GenerateCall(
|
||||
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "Allocate"),
|
||||
allocate_arguments, {class_type}, false);
|
||||
DCHECK(allocate_result.IsOnStack());
|
||||
|
||||
InitializeClass(class_type, allocate_result, initializer_results);
|
||||
InitializeClass(class_type, allocate_result, initializer_results, layout);
|
||||
|
||||
return stack_scope.Yield(GenerateCall(
|
||||
"%RawDownCast", Arguments{{allocate_result}, {}}, {class_type}));
|
||||
@ -1903,14 +1946,19 @@ LocationReference ImplementationVisitor::GetLocationReference(
|
||||
|
||||
LocationReference ImplementationVisitor::GetLocationReference(
|
||||
FieldAccessExpression* expr) {
|
||||
const std::string& fieldname = expr->field->value;
|
||||
LocationReference reference = GetLocationReference(expr->object);
|
||||
return GenerateFieldAccess(GetLocationReference(expr->object),
|
||||
expr->field->value, expr->field->pos);
|
||||
}
|
||||
|
||||
LocationReference ImplementationVisitor::GenerateFieldAccess(
|
||||
LocationReference reference, const std::string& fieldname,
|
||||
base::Optional<SourcePosition> pos) {
|
||||
if (reference.IsVariableAccess() &&
|
||||
reference.variable().type()->IsStructType()) {
|
||||
const StructType* type = StructType::cast(reference.variable().type());
|
||||
const Field& field = type->LookupField(fieldname);
|
||||
if (GlobalContext::collect_language_server_data()) {
|
||||
LanguageServerData::AddDefinition(expr->field->pos, field.pos);
|
||||
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
|
||||
LanguageServerData::AddDefinition(*pos, field.pos);
|
||||
}
|
||||
if (field.const_qualified) {
|
||||
VisitResult t_value = ProjectStructField(reference.variable(), fieldname);
|
||||
@ -1922,10 +1970,10 @@ LocationReference ImplementationVisitor::GetLocationReference(
|
||||
}
|
||||
}
|
||||
if (reference.IsTemporary() && reference.temporary().type()->IsStructType()) {
|
||||
if (GlobalContext::collect_language_server_data()) {
|
||||
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
|
||||
const StructType* type = StructType::cast(reference.temporary().type());
|
||||
const Field& field = type->LookupField(fieldname);
|
||||
LanguageServerData::AddDefinition(expr->field->pos, field.pos);
|
||||
LanguageServerData::AddDefinition(*pos, field.pos);
|
||||
}
|
||||
return LocationReference::Temporary(
|
||||
ProjectStructField(reference.temporary(), fieldname),
|
||||
@ -1949,11 +1997,14 @@ LocationReference ImplementationVisitor::GetLocationReference(
|
||||
}
|
||||
if (const StructType* struct_type =
|
||||
StructType::DynamicCast(*generic_type)) {
|
||||
const Field& field = struct_type->LookupField(expr->field->value);
|
||||
const Field& field = struct_type->LookupField(fieldname);
|
||||
// Update the Reference's type to refer to the field type within the
|
||||
// struct.
|
||||
ref.SetType(TypeOracle::GetReferenceType(field.name_and_type.type));
|
||||
if (field.offset != 0) {
|
||||
if (!field.offset.has_value()) {
|
||||
Error("accessing field with unknown offset").Throw();
|
||||
}
|
||||
if (*field.offset != 0) {
|
||||
// Copy the Reference struct up the stack and update the new copy's
|
||||
// |offset| value to point to the struct field.
|
||||
StackScope scope(this);
|
||||
@ -1961,7 +2012,7 @@ LocationReference ImplementationVisitor::GetLocationReference(
|
||||
VisitResult ref_offset = ProjectStructField(ref, "offset");
|
||||
VisitResult struct_offset{
|
||||
TypeOracle::GetIntPtrType()->ConstexprVersion(),
|
||||
std::to_string(field.offset)};
|
||||
std::to_string(*field.offset)};
|
||||
VisitResult updated_offset =
|
||||
GenerateCall("+", Arguments{{ref_offset, struct_offset}, {}});
|
||||
assembler().Poke(ref_offset.stack_range(), updated_offset.stack_range(),
|
||||
@ -1980,28 +2031,10 @@ LocationReference ImplementationVisitor::GetLocationReference(
|
||||
QualifiedName{"." + fieldname}, {object_result.type()});
|
||||
if ((*class_type)->HasField(fieldname) && !has_explicit_overloads) {
|
||||
const Field& field = (*class_type)->LookupField(fieldname);
|
||||
if (GlobalContext::collect_language_server_data()) {
|
||||
LanguageServerData::AddDefinition(expr->field->pos, field.pos);
|
||||
}
|
||||
if (field.index) {
|
||||
assembler().Emit(
|
||||
CreateFieldReferenceInstruction{*class_type, fieldname});
|
||||
StackRange slice_range = assembler().TopRange(2);
|
||||
// Fetch the length from the object
|
||||
VisitResult length = GenerateArrayLength(object_result, field);
|
||||
slice_range.Extend(length.stack_range());
|
||||
const Type* slice_type =
|
||||
TypeOracle::GetSliceType(field.name_and_type.type);
|
||||
return LocationReference::HeapSlice(
|
||||
VisitResult(slice_type, slice_range));
|
||||
} else {
|
||||
assembler().Emit(
|
||||
CreateFieldReferenceInstruction{*class_type, fieldname});
|
||||
const Type* reference_type =
|
||||
TypeOracle::GetReferenceType(field.name_and_type.type);
|
||||
return LocationReference::HeapReference(
|
||||
VisitResult(reference_type, assembler().TopRange(2)));
|
||||
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
|
||||
LanguageServerData::AddDefinition(*pos, field.pos);
|
||||
}
|
||||
return GenerateFieldReference(object_result, field, *class_type);
|
||||
}
|
||||
}
|
||||
return LocationReference::FieldAccess(object_result, fieldname);
|
||||
@ -2161,19 +2194,32 @@ void ImplementationVisitor::GenerateAssignToLocation(
|
||||
ReportError("assigning a value directly to an indexed field isn't allowed");
|
||||
} else if (reference.IsHeapReference()) {
|
||||
const Type* referenced_type = reference.ReferencedType();
|
||||
GenerateCopy(reference.heap_reference());
|
||||
VisitResult converted_assignment_value =
|
||||
GenerateImplicitConvert(referenced_type, assignment_value);
|
||||
if (referenced_type == TypeOracle::GetFloat64Type()) {
|
||||
VisitResult silenced_float_value =
|
||||
GenerateCall("Float64SilenceNaN", Arguments{{assignment_value}, {}});
|
||||
assembler().Poke(converted_assignment_value.stack_range(),
|
||||
silenced_float_value.stack_range(), referenced_type);
|
||||
if (auto* struct_type = StructType::DynamicCast(referenced_type)) {
|
||||
if (assignment_value.type() != referenced_type) {
|
||||
ReportError("Cannot assign to ", *referenced_type,
|
||||
" with value of type ", *assignment_value.type());
|
||||
}
|
||||
for (const Field& field : struct_type->fields()) {
|
||||
const std::string& fieldname = field.name_and_type.name;
|
||||
GenerateAssignToLocation(
|
||||
GenerateFieldAccess(reference, fieldname),
|
||||
ProjectStructField(assignment_value, fieldname));
|
||||
}
|
||||
} else {
|
||||
GenerateCopy(reference.heap_reference());
|
||||
VisitResult converted_assignment_value =
|
||||
GenerateImplicitConvert(referenced_type, assignment_value);
|
||||
if (referenced_type == TypeOracle::GetFloat64Type()) {
|
||||
VisitResult silenced_float_value = GenerateCall(
|
||||
"Float64SilenceNaN", Arguments{{assignment_value}, {}});
|
||||
assembler().Poke(converted_assignment_value.stack_range(),
|
||||
silenced_float_value.stack_range(), referenced_type);
|
||||
}
|
||||
assembler().Emit(StoreReferenceInstruction{referenced_type});
|
||||
}
|
||||
assembler().Emit(StoreReferenceInstruction{referenced_type});
|
||||
} else if (reference.IsBitFieldAccess()) {
|
||||
// First fetch the bitfield struct, then set the updated bits, then store it
|
||||
// back to where we found it.
|
||||
// First fetch the bitfield struct, then set the updated bits, then store
|
||||
// it back to where we found it.
|
||||
VisitResult bit_field_struct =
|
||||
GenerateFetchFromLocation(reference.bit_field_struct_location());
|
||||
VisitResult converted_value =
|
||||
@ -3747,7 +3793,7 @@ void GenerateClassFieldVerifier(const std::string& class_name,
|
||||
|
||||
if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
|
||||
for (const Field& field : struct_type->fields()) {
|
||||
GenerateFieldValueVerifier(class_name, f, field, field.offset,
|
||||
GenerateFieldValueVerifier(class_name, f, field, *field.offset,
|
||||
std::to_string(struct_type->PackedSize()),
|
||||
cc_contents);
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ namespace torque {
|
||||
template <typename T>
|
||||
class Binding;
|
||||
struct LocalValue;
|
||||
class ImplementationVisitor;
|
||||
|
||||
// LocationReference is the representation of an l-value, so a value that might
|
||||
// allow for assignment. For uniformity, this class can also represent
|
||||
@ -201,7 +202,12 @@ class LocationReference {
|
||||
struct InitializerResults {
|
||||
std::vector<Identifier*> names;
|
||||
std::map<std::string, VisitResult> field_value_map;
|
||||
};
|
||||
|
||||
struct LayoutForInitialization {
|
||||
std::map<std::string, VisitResult> array_lengths;
|
||||
std::map<std::string, VisitResult> offsets;
|
||||
VisitResult size;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
@ -402,8 +408,11 @@ class ImplementationVisitor {
|
||||
const ClassType* class_type,
|
||||
const std::vector<NameAndExpression>& expressions);
|
||||
LocationReference GenerateFieldReference(VisitResult object,
|
||||
const NameAndType& field,
|
||||
const Field& field,
|
||||
const ClassType* class_type);
|
||||
LocationReference GenerateFieldReference(
|
||||
VisitResult object, const Field& field,
|
||||
const LayoutForInitialization& layout);
|
||||
VisitResult GenerateArrayLength(
|
||||
Expression* array_length, Namespace* nspace,
|
||||
const std::map<std::string, LocationReference>& bindings);
|
||||
@ -411,15 +420,13 @@ class ImplementationVisitor {
|
||||
VisitResult GenerateArrayLength(const ClassType* class_type,
|
||||
const InitializerResults& initializer_results,
|
||||
const Field& field);
|
||||
VisitResult GenerateObjectSize(const ClassType* class_type,
|
||||
const InitializerResults& initializer_results);
|
||||
|
||||
void InitializeFieldFromSpread(VisitResult object, const Field& field,
|
||||
const InitializerResults& initializer_results,
|
||||
const ClassType* class_type);
|
||||
LayoutForInitialization GenerateLayoutForInitialization(
|
||||
const ClassType* class_type,
|
||||
const InitializerResults& initializer_results);
|
||||
|
||||
void InitializeClass(const ClassType* class_type, VisitResult allocate_result,
|
||||
const InitializerResults& initializer_results);
|
||||
const InitializerResults& initializer_results,
|
||||
const LayoutForInitialization& layout);
|
||||
|
||||
VisitResult Visit(StructExpression* decl);
|
||||
|
||||
@ -427,6 +434,9 @@ class ImplementationVisitor {
|
||||
LocationReference GetLocationReference(IdentifierExpression* expr);
|
||||
LocationReference GetLocationReference(DereferenceExpression* expr);
|
||||
LocationReference GetLocationReference(FieldAccessExpression* expr);
|
||||
LocationReference GenerateFieldAccess(
|
||||
LocationReference reference, const std::string& fieldname,
|
||||
base::Optional<SourcePosition> pos = {});
|
||||
LocationReference GetLocationReference(ElementAccessExpression* expr);
|
||||
|
||||
VisitResult GenerateFetchFromLocation(const LocationReference& reference);
|
||||
|
@ -290,14 +290,6 @@ void UnsafeCastInstruction::TypeInstruction(Stack<const Type*>* stack,
|
||||
stack->Poke(stack->AboveTop() - 1, destination_type);
|
||||
}
|
||||
|
||||
void CreateFieldReferenceInstruction::TypeInstruction(
|
||||
Stack<const Type*>* stack, ControlFlowGraph* cfg) const {
|
||||
if (stack->Top() != TypeOracle::GetUninitializedHeapObjectType()) {
|
||||
ExpectSubtype(stack->Top(), type);
|
||||
}
|
||||
stack->Push(TypeOracle::GetIntPtrType());
|
||||
}
|
||||
|
||||
void LoadReferenceInstruction::TypeInstruction(Stack<const Type*>* stack,
|
||||
ControlFlowGraph* cfg) const {
|
||||
ExpectType(TypeOracle::GetIntPtrType(), stack->Pop());
|
||||
|
@ -30,7 +30,6 @@ class RuntimeFunction;
|
||||
V(DeleteRangeInstruction) \
|
||||
V(PushUninitializedInstruction) \
|
||||
V(PushBuiltinPointerInstruction) \
|
||||
V(CreateFieldReferenceInstruction) \
|
||||
V(LoadReferenceInstruction) \
|
||||
V(StoreReferenceInstruction) \
|
||||
V(LoadBitFieldInstruction) \
|
||||
@ -206,17 +205,6 @@ struct NamespaceConstantInstruction : InstructionBase {
|
||||
NamespaceConstant* constant;
|
||||
};
|
||||
|
||||
struct CreateFieldReferenceInstruction : InstructionBase {
|
||||
TORQUE_INSTRUCTION_BOILERPLATE()
|
||||
CreateFieldReferenceInstruction(const ClassType* type, std::string field_name)
|
||||
: type(type), field_name(std::move(field_name)) {
|
||||
// Trigger errors early.
|
||||
this->type->LookupField(this->field_name);
|
||||
}
|
||||
const ClassType* type;
|
||||
std::string field_name;
|
||||
};
|
||||
|
||||
struct LoadReferenceInstruction : InstructionBase {
|
||||
TORQUE_INSTRUCTION_BOILERPLATE()
|
||||
explicit LoadReferenceInstruction(const Type* type) : type(type) {}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <algorithm>
|
||||
#include <cctype>
|
||||
#include <set>
|
||||
#include <stdexcept>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "src/common/globals.h"
|
||||
@ -1618,7 +1619,15 @@ base::Optional<ParseResult> MakeAssignmentExpression(
|
||||
base::Optional<ParseResult> MakeNumberLiteralExpression(
|
||||
ParseResultIterator* child_results) {
|
||||
auto number = child_results->NextAs<std::string>();
|
||||
Expression* result = MakeNode<NumberLiteralExpression>(std::move(number));
|
||||
// TODO(tebbi): Support 64bit literals.
|
||||
// Meanwhile, we type it as constexpr float64 when out of int32 range.
|
||||
double value = 0;
|
||||
try {
|
||||
value = std::stod(number);
|
||||
} catch (const std::out_of_range&) {
|
||||
Error("double literal out-of-range").Throw();
|
||||
}
|
||||
Expression* result = MakeNode<NumberLiteralExpression>(value);
|
||||
return ParseResult{result};
|
||||
}
|
||||
|
||||
|
@ -204,8 +204,7 @@ const StructType* TypeVisitor::ComputeType(
|
||||
CurrentScope::Scope struct_namespace_scope(struct_type->nspace());
|
||||
CurrentSourcePosition::Scope position_activator(decl->pos);
|
||||
|
||||
size_t offset = 0;
|
||||
bool packable = true;
|
||||
ResidueClass offset = 0;
|
||||
for (auto& field : decl->fields) {
|
||||
CurrentSourcePosition::Scope position_activator(
|
||||
field.name_and_type.type->pos);
|
||||
@ -218,20 +217,11 @@ const StructType* TypeVisitor::ComputeType(
|
||||
struct_type,
|
||||
base::nullopt,
|
||||
{field.name_and_type.name->value, field_type},
|
||||
offset,
|
||||
offset.SingleValue(),
|
||||
false,
|
||||
field.const_qualified,
|
||||
false};
|
||||
auto optional_size = SizeOf(f.name_and_type.type);
|
||||
// Structs may contain fields that aren't representable in packed form. If
|
||||
// so, then this field and any subsequent fields should have their offsets
|
||||
// marked as invalid.
|
||||
if (!optional_size.has_value()) {
|
||||
packable = false;
|
||||
}
|
||||
if (!packable) {
|
||||
f.offset = Field::kInvalidOffset;
|
||||
}
|
||||
struct_type->RegisterField(f);
|
||||
// Offsets are assigned based on an assumption of no space between members.
|
||||
// This might lead to invalid alignment in some cases, but most structs are
|
||||
@ -243,6 +233,10 @@ const StructType* TypeVisitor::ComputeType(
|
||||
size_t field_size = 0;
|
||||
std::tie(field_size, std::ignore) = *optional_size;
|
||||
offset += field_size;
|
||||
} else {
|
||||
// Structs may contain fields that aren't representable in packed form. If
|
||||
// so, the offset of subsequent fields are marked as invalid.
|
||||
offset = ResidueClass::Unknown();
|
||||
}
|
||||
}
|
||||
return struct_type;
|
||||
@ -389,11 +383,13 @@ Signature TypeVisitor::MakeSignature(const CallableDeclaration* declaration) {
|
||||
void TypeVisitor::VisitClassFieldsAndMethods(
|
||||
ClassType* class_type, const ClassDeclaration* class_declaration) {
|
||||
const ClassType* super_class = class_type->GetSuperClass();
|
||||
size_t class_offset = super_class ? super_class->header_size() : 0;
|
||||
size_t header_size = class_offset;
|
||||
DCHECK_IMPLIES(super_class && !super_class->size(),
|
||||
class_declaration->fields.empty());
|
||||
bool seen_indexed_field = false;
|
||||
ResidueClass class_offset = 0;
|
||||
size_t header_size = 0;
|
||||
if (super_class) {
|
||||
class_offset = super_class->size();
|
||||
header_size = super_class->header_size();
|
||||
}
|
||||
|
||||
for (const ClassFieldExpression& field_expression :
|
||||
class_declaration->fields) {
|
||||
CurrentSourcePosition::Scope position_activator(
|
||||
@ -436,55 +432,42 @@ void TypeVisitor::VisitClassFieldsAndMethods(
|
||||
}
|
||||
}
|
||||
}
|
||||
base::Optional<Expression*> array_length;
|
||||
if (field_expression.index) {
|
||||
if (seen_indexed_field ||
|
||||
(super_class && super_class->HasIndexedField())) {
|
||||
ReportError(
|
||||
"only one indexable field is currently supported per class");
|
||||
}
|
||||
seen_indexed_field = true;
|
||||
array_length = *field_expression.index;
|
||||
} else {
|
||||
if (seen_indexed_field) {
|
||||
ReportError("cannot declare non-indexable field \"",
|
||||
field_expression.name_and_type.name,
|
||||
"\" after an indexable field "
|
||||
"declaration");
|
||||
}
|
||||
}
|
||||
base::Optional<Expression*> array_length = field_expression.index;
|
||||
const Field& field = class_type->RegisterField(
|
||||
{field_expression.name_and_type.name->pos,
|
||||
class_type,
|
||||
array_length,
|
||||
{field_expression.name_and_type.name->value, field_type},
|
||||
class_offset,
|
||||
class_offset.SingleValue(),
|
||||
field_expression.weak,
|
||||
field_expression.const_qualified,
|
||||
field_expression.generate_verify});
|
||||
size_t field_size;
|
||||
std::tie(field_size, std::ignore) = field.GetFieldSizeInformation();
|
||||
// Our allocations don't support alignments beyond kTaggedSize.
|
||||
size_t alignment = std::min(
|
||||
static_cast<size_t>(TargetArchitecture::TaggedSize()), field_size);
|
||||
if (alignment > 0 && class_offset % alignment != 0) {
|
||||
ReportError("field ", field_expression.name_and_type.name, " at offset ",
|
||||
class_offset, " is not ", alignment, "-byte aligned.");
|
||||
}
|
||||
if (!field_expression.index) {
|
||||
class_offset += field_size;
|
||||
// In-object properties are not considered part of the header.
|
||||
if (!class_type->IsShape()) {
|
||||
header_size = class_offset;
|
||||
ResidueClass field_size = std::get<0>(field.GetFieldSizeInformation());
|
||||
if (field.index) {
|
||||
if (auto literal = NumberLiteralExpression::DynamicCast(*field.index)) {
|
||||
size_t value = static_cast<size_t>(literal->number);
|
||||
if (value != literal->number) {
|
||||
Error("non-integral array length").Position(field.pos);
|
||||
}
|
||||
field_size *= value;
|
||||
} else {
|
||||
field_size *= ResidueClass::Unknown();
|
||||
}
|
||||
}
|
||||
field.ValidateAlignment(class_offset);
|
||||
class_offset += field_size;
|
||||
// In-object properties are not considered part of the header.
|
||||
if (class_offset.SingleValue() && !class_type->IsShape()) {
|
||||
header_size = *class_offset.SingleValue();
|
||||
}
|
||||
if (!field.index && !class_offset.SingleValue()) {
|
||||
Error("Indexed fields have to be at the end of the object")
|
||||
.Position(field.pos);
|
||||
}
|
||||
}
|
||||
DCHECK_GT(header_size, 0);
|
||||
class_type->header_size_ = header_size;
|
||||
if ((!super_class || super_class->size()) && !seen_indexed_field) {
|
||||
DCHECK_GE(class_offset, header_size);
|
||||
class_type->size_ = class_offset;
|
||||
}
|
||||
class_type->size_ = class_offset;
|
||||
class_type->GenerateAccessors();
|
||||
DeclareMethods(class_type, class_declaration->methods);
|
||||
}
|
||||
|
@ -4,12 +4,14 @@
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "src/torque/types.h"
|
||||
|
||||
#include "src/base/bits.h"
|
||||
#include "src/torque/ast.h"
|
||||
#include "src/torque/declarable.h"
|
||||
#include "src/torque/global-context.h"
|
||||
#include "src/torque/type-oracle.h"
|
||||
#include "src/torque/type-visitor.h"
|
||||
#include "src/torque/types.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -363,23 +365,8 @@ std::string StructType::GetGeneratedTypeNameImpl() const {
|
||||
|
||||
size_t StructType::PackedSize() const {
|
||||
size_t result = 0;
|
||||
if (!fields_.empty()) {
|
||||
const Field& last = fields_.back();
|
||||
if (last.offset == Field::kInvalidOffset) {
|
||||
// This struct can't be packed. Find the first invalid field and use its
|
||||
// name and position for the error.
|
||||
for (const Field& field : fields_) {
|
||||
if (field.offset == Field::kInvalidOffset) {
|
||||
Error("Cannot compute packed size of ", ToString(), " due to field ",
|
||||
field.name_and_type.name, " of unknown size")
|
||||
.Position(field.pos);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
size_t field_size = 0;
|
||||
std::tie(field_size, std::ignore) = last.GetFieldSizeInformation();
|
||||
result = last.offset + field_size;
|
||||
for (const Field& field : fields()) {
|
||||
result += std::get<0>(field.GetFieldSizeInformation());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -446,6 +433,7 @@ ClassType::ClassType(const Type* parent, Namespace* nspace,
|
||||
const std::string& generates, const ClassDeclaration* decl,
|
||||
const TypeAlias* alias)
|
||||
: AggregateType(Kind::kClassType, parent, nspace, name),
|
||||
size_(ResidueClass::Unknown()),
|
||||
flags_(flags & ~(kInternalFlags)),
|
||||
generates_(generates),
|
||||
decl_(decl),
|
||||
@ -741,6 +729,71 @@ std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const {
|
||||
return std::make_tuple(0, "#no size");
|
||||
}
|
||||
|
||||
size_t Type::AlignmentLog2() const {
|
||||
if (parent()) return parent()->AlignmentLog2();
|
||||
return TargetArchitecture::TaggedSize();
|
||||
}
|
||||
|
||||
size_t AbstractType::AlignmentLog2() const {
|
||||
size_t alignment;
|
||||
if (this == TypeOracle::GetTaggedType()) {
|
||||
alignment = TargetArchitecture::TaggedSize();
|
||||
} else if (this == TypeOracle::GetRawPtrType()) {
|
||||
alignment = TargetArchitecture::RawPtrSize();
|
||||
} else if (this == TypeOracle::GetVoidType()) {
|
||||
alignment = 1;
|
||||
} else if (this == TypeOracle::GetInt8Type()) {
|
||||
alignment = kUInt8Size;
|
||||
} else if (this == TypeOracle::GetUint8Type()) {
|
||||
alignment = kUInt8Size;
|
||||
} else if (this == TypeOracle::GetInt16Type()) {
|
||||
alignment = kUInt16Size;
|
||||
} else if (this == TypeOracle::GetUint16Type()) {
|
||||
alignment = kUInt16Size;
|
||||
} else if (this == TypeOracle::GetInt32Type()) {
|
||||
alignment = kInt32Size;
|
||||
} else if (this == TypeOracle::GetUint32Type()) {
|
||||
alignment = kInt32Size;
|
||||
} else if (this == TypeOracle::GetFloat64Type()) {
|
||||
alignment = kDoubleSize;
|
||||
} else if (this == TypeOracle::GetIntPtrType()) {
|
||||
alignment = TargetArchitecture::RawPtrSize();
|
||||
} else if (this == TypeOracle::GetUIntPtrType()) {
|
||||
alignment = TargetArchitecture::RawPtrSize();
|
||||
} else {
|
||||
return Type::AlignmentLog2();
|
||||
}
|
||||
alignment = std::min(alignment, TargetArchitecture::TaggedSize());
|
||||
return base::bits::WhichPowerOfTwo(alignment);
|
||||
}
|
||||
|
||||
size_t StructType::AlignmentLog2() const {
|
||||
size_t alignment_log_2 = 0;
|
||||
for (const Field& field : fields()) {
|
||||
alignment_log_2 =
|
||||
std::max(alignment_log_2, field.name_and_type.type->AlignmentLog2());
|
||||
}
|
||||
return alignment_log_2;
|
||||
}
|
||||
|
||||
void Field::ValidateAlignment(ResidueClass at_offset) const {
|
||||
const Type* type = name_and_type.type;
|
||||
if (const StructType* struct_type = StructType::DynamicCast(type)) {
|
||||
for (const Field& field : struct_type->fields()) {
|
||||
field.ValidateAlignment(at_offset);
|
||||
size_t field_size = std::get<0>(field.GetFieldSizeInformation());
|
||||
at_offset += field_size;
|
||||
}
|
||||
} else {
|
||||
size_t alignment_log_2 = name_and_type.type->AlignmentLog2();
|
||||
if (at_offset.AlignmentLog2() < alignment_log_2) {
|
||||
Error("field ", name_and_type.name, " at offset ", at_offset, " is not ",
|
||||
size_t{1} << alignment_log_2, "-byte aligned.")
|
||||
.Position(pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type) {
|
||||
std::string size_string;
|
||||
size_t size;
|
||||
|
@ -137,6 +137,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
|
||||
const MaybeSpecializationKey& GetSpecializedFrom() const {
|
||||
return specialized_from_;
|
||||
}
|
||||
|
||||
static base::Optional<const Type*> MatchUnaryGeneric(const Type* type,
|
||||
GenericType* generic);
|
||||
|
||||
@ -152,6 +153,8 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual size_t AlignmentLog2() const;
|
||||
|
||||
protected:
|
||||
Type(TypeBase::Kind kind, const Type* parent,
|
||||
MaybeSpecializationKey specialized_from = base::nullopt);
|
||||
@ -197,6 +200,8 @@ struct Field {
|
||||
// reliance of string types is quite clunky.
|
||||
std::tuple<size_t, std::string> GetFieldSizeInformation() const;
|
||||
|
||||
void ValidateAlignment(ResidueClass at_offset) const;
|
||||
|
||||
SourcePosition pos;
|
||||
const AggregateType* aggregate;
|
||||
base::Optional<Expression*> index;
|
||||
@ -205,15 +210,14 @@ struct Field {
|
||||
// The byte offset of this field from the beginning of the containing class or
|
||||
// struct. Most structs are never packed together in memory, and are only used
|
||||
// to hold a batch of related CSA TNode values, in which case |offset| is
|
||||
// irrelevant. In structs, this value can be set to kInvalidOffset to indicate
|
||||
// that the struct should never be used in packed form.
|
||||
size_t offset;
|
||||
// irrelevant.
|
||||
// The offset may be unknown because the field is after an indexed field or
|
||||
// because we don't support the struct field for on-heap layouts.
|
||||
base::Optional<size_t> offset;
|
||||
|
||||
bool is_weak;
|
||||
bool const_qualified;
|
||||
bool generate_verify;
|
||||
|
||||
static constexpr size_t kInvalidOffset = SIZE_MAX;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const Field& name_and_type);
|
||||
@ -269,6 +273,8 @@ class AbstractType final : public Type {
|
||||
|
||||
std::vector<RuntimeType> GetRuntimeTypes() const override;
|
||||
|
||||
size_t AlignmentLog2() const override;
|
||||
|
||||
private:
|
||||
friend class TypeOracle;
|
||||
AbstractType(const Type* parent, AbstractTypeFlags flags,
|
||||
@ -564,9 +570,11 @@ class StructType final : public AggregateType {
|
||||
|
||||
std::string GetGeneratedTypeNameImpl() const override;
|
||||
|
||||
// Returns the sum of the size of all members. Does not validate alignment.
|
||||
// Returns the sum of the size of all members.
|
||||
size_t PackedSize() const;
|
||||
|
||||
size_t AlignmentLog2() const override;
|
||||
|
||||
private:
|
||||
friend class TypeOracle;
|
||||
StructType(Namespace* nspace, const StructDeclaration* decl,
|
||||
@ -614,7 +622,7 @@ class ClassType final : public AggregateType {
|
||||
if (!is_finalized_) Finalize();
|
||||
return header_size_;
|
||||
}
|
||||
base::Optional<size_t> size() const {
|
||||
ResidueClass size() const {
|
||||
if (!is_finalized_) Finalize();
|
||||
return size_;
|
||||
}
|
||||
@ -656,7 +664,7 @@ class ClassType final : public AggregateType {
|
||||
const ClassDeclaration* decl, const TypeAlias* alias);
|
||||
|
||||
size_t header_size_;
|
||||
base::Optional<size_t> size_;
|
||||
ResidueClass size_;
|
||||
mutable ClassFlags flags_;
|
||||
const std::string generates_;
|
||||
const ClassDeclaration* decl_;
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/logging.h"
|
||||
#include "src/torque/ast.h"
|
||||
#include "src/torque/declarable.h"
|
||||
@ -357,6 +358,18 @@ IncludeObjectMacrosScope::~IncludeObjectMacrosScope() {
|
||||
os_ << "\n#include \"src/objects/object-macros-undef.h\"\n";
|
||||
}
|
||||
|
||||
size_t ResidueClass::AlignmentLog2() const {
|
||||
if (value_ == 0) return modulus_log_2_;
|
||||
return base::bits::CountTrailingZeros(value_);
|
||||
}
|
||||
|
||||
const size_t ResidueClass::kMaxModulusLog2;
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const ResidueClass& a) {
|
||||
if (a.SingleValue().has_value()) return os << *a.SingleValue();
|
||||
return os << "[" << a.value_ << " mod 2^" << a.modulus_log_2_ << "]";
|
||||
}
|
||||
|
||||
} // namespace torque
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -407,6 +407,87 @@ class IncludeObjectMacrosScope {
|
||||
std::ostream& os_;
|
||||
};
|
||||
|
||||
// A value of ResidueClass is a congruence class of integers modulo a power
|
||||
// of 2.
|
||||
// In contrast to common modulo arithmetic, we also allow addition and
|
||||
// multiplication of congruence classes with different modulus. In this case, we
|
||||
// do an abstract-interpretation style approximation to produce an as small as
|
||||
// possible congruence class. ResidueClass is used to represent partial
|
||||
// knowledge about offsets and sizes to validate alignment constraints.
|
||||
// ResidueClass(x,m) = {y \in Z | x == y mod 2^m} = {x+k2^m | k \in Z} where Z
|
||||
// is the set of all integers.
|
||||
// Notation: 2^x is 2 to the power of x.
|
||||
class ResidueClass {
|
||||
public:
|
||||
ResidueClass(size_t value, size_t modulus_log_2 =
|
||||
kMaxModulusLog2) // NOLINT(runtime/explicit)
|
||||
: value_(value),
|
||||
modulus_log_2_(std::min(modulus_log_2, kMaxModulusLog2)) {
|
||||
if (modulus_log_2_ < kMaxModulusLog2) {
|
||||
value_ %= size_t{1} << modulus_log_2_;
|
||||
}
|
||||
}
|
||||
|
||||
// 0 modulo 1, in other words, the class of all integers.
|
||||
static ResidueClass Unknown() { return ResidueClass{0, 0}; }
|
||||
|
||||
// If the modulus corresponds to the size of size_t, it represents a concrete
|
||||
// value.
|
||||
base::Optional<size_t> SingleValue() const {
|
||||
if (modulus_log_2_ == kMaxModulusLog2) return value_;
|
||||
return base::nullopt;
|
||||
}
|
||||
|
||||
friend ResidueClass operator+(const ResidueClass& a, const ResidueClass& b) {
|
||||
return ResidueClass{a.value_ + b.value_,
|
||||
std::min(a.modulus_log_2_, b.modulus_log_2_)};
|
||||
}
|
||||
|
||||
// Reasoning for the choice of the new modulus:
|
||||
// {x+k2^a | k \in Z} * {y+l2^b | l \in Z}
|
||||
// = {xy + xl2^b + yk2^a + kl2^(a+b)| k,l \in Z},
|
||||
// which is a subset of {xy + k2^c | k \in Z}
|
||||
// if 2^c is a common divisor of x2^b, y2^a and hence also of 2^(a+b) since
|
||||
// x<2^a and y<2^b.
|
||||
// So we use the gcd of x2^b and y2^a as the new modulus.
|
||||
friend ResidueClass operator*(const ResidueClass& a, const ResidueClass& b) {
|
||||
return ResidueClass{a.value_ * b.value_,
|
||||
std::min(a.modulus_log_2_ + b.AlignmentLog2(),
|
||||
b.modulus_log_2_ + a.AlignmentLog2())};
|
||||
}
|
||||
|
||||
friend std::ostream& operator<<(std::ostream& os, const ResidueClass& a);
|
||||
|
||||
ResidueClass& operator+=(const ResidueClass& other) {
|
||||
*this = *this + other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
ResidueClass& operator*=(const ResidueClass& other) {
|
||||
*this = *this * other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// 2^AlignmentLog2() is the larget power of 2 that divides all elements of the
|
||||
// congruence class.
|
||||
size_t AlignmentLog2() const;
|
||||
size_t Alignment() const {
|
||||
DCHECK_LT(AlignmentLog2(), kMaxModulusLog2);
|
||||
return size_t{1} << AlignmentLog2();
|
||||
}
|
||||
|
||||
private:
|
||||
// The value is the representative of the congruence class. It's always
|
||||
// smaller than 2^modulus_log_2_.
|
||||
size_t value_;
|
||||
// Base 2 logarithm of the modulus.
|
||||
size_t modulus_log_2_;
|
||||
|
||||
// size_t values are modulo 2^kMaxModulusLog2, so we don't consider larger
|
||||
// modulus.
|
||||
static const size_t kMaxModulusLog2 = 8 * sizeof(size_t);
|
||||
};
|
||||
|
||||
} // namespace torque
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -3507,8 +3507,7 @@ TEST(SmallOrderedHashMapAllocate) {
|
||||
{
|
||||
CodeStubAssembler m(asm_tester.state());
|
||||
TNode<Smi> capacity = m.CAST(m.Parameter(0));
|
||||
m.Return(m.AllocateSmallOrderedHashTable<SmallOrderedHashMap>(
|
||||
m.SmiToIntPtr(capacity)));
|
||||
m.Return(m.AllocateSmallOrderedHashMap(m.SmiToIntPtr(capacity)));
|
||||
}
|
||||
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
|
||||
|
||||
@ -3546,8 +3545,7 @@ TEST(SmallOrderedHashSetAllocate) {
|
||||
{
|
||||
CodeStubAssembler m(asm_tester.state());
|
||||
TNode<Smi> capacity = m.CAST(m.Parameter(0));
|
||||
m.Return(m.AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
|
||||
m.SmiToIntPtr(capacity)));
|
||||
m.Return(m.AllocateSmallOrderedHashSet(m.SmiToIntPtr(capacity)));
|
||||
}
|
||||
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
|
||||
|
||||
|
@ -80,6 +80,8 @@ extern macro TaggedToSmi(Object): Smi
|
||||
extern macro TaggedToHeapObject(Object): HeapObject
|
||||
labels CastError;
|
||||
|
||||
extern macro IntPtrConstant(constexpr int31): intptr;
|
||||
|
||||
macro FromConstexpr<To: type, From: type>(o: From): To;
|
||||
FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi {
|
||||
return SmiConstant(s);
|
||||
@ -87,6 +89,9 @@ FromConstexpr<Smi, constexpr Smi>(s: constexpr Smi): Smi {
|
||||
FromConstexpr<Smi, constexpr int31>(s: constexpr int31): Smi {
|
||||
return %FromConstexpr<Smi>(s);
|
||||
}
|
||||
FromConstexpr<intptr, constexpr int31>(i: constexpr int31): intptr {
|
||||
return IntPtrConstant(i);
|
||||
}
|
||||
|
||||
macro Cast<A : type extends Object>(implicit context: Context)(o: Object): A
|
||||
labels CastError {
|
||||
|
Loading…
Reference in New Issue
Block a user