[torque] properly support holes in FixedDoubleArray
Bug: v8:7793 Change-Id: I4fc039711eb9aa9d551144ea6fccc926d4803349 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1993290 Commit-Queue: Tobias Tebbi <tebbi@chromium.org> Reviewed-by: Nico Hartmann <nicohartmann@chromium.org> Cr-Commit-Position: refs/heads/master@{#65808}
This commit is contained in:
parent
b34e5b42d2
commit
6e5e0aed55
@ -45,8 +45,8 @@ namespace array {
|
||||
const array: JSArray = UnsafeCast<JSArray>(receiver);
|
||||
const fixedDoubleArray: FixedDoubleArray =
|
||||
UnsafeCast<FixedDoubleArray>(array.elements);
|
||||
const element: float64 = LoadDoubleWithHoleCheck(fixedDoubleArray, k)
|
||||
otherwise return kEmptyString;
|
||||
const element: float64 =
|
||||
fixedDoubleArray.floats[k].Value() otherwise return kEmptyString;
|
||||
return AllocateHeapNumberWithValue(element);
|
||||
}
|
||||
|
||||
|
@ -20,8 +20,7 @@ namespace array {
|
||||
elements: FixedArrayBase, index: Smi): JSAny
|
||||
labels IfHole {
|
||||
const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
|
||||
const element: float64 = LoadDoubleWithHoleCheck(elements, index)
|
||||
otherwise IfHole;
|
||||
const element: float64 = elements.floats[index].Value() otherwise IfHole;
|
||||
return AllocateHeapNumberWithValue(element);
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ namespace array {
|
||||
typeswitch (
|
||||
UnsafeCast<(Number | TheHole)>(this.fixedArray.objects[i])) {
|
||||
case (n: Number): {
|
||||
elements.floats[i] = Convert<float64>(n);
|
||||
elements.floats[i] = Convert<float64_or_hole>(n);
|
||||
}
|
||||
case (TheHole): {
|
||||
}
|
||||
|
@ -24,8 +24,7 @@ namespace array {
|
||||
const elements: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
|
||||
// This macro is only used for PACKED_DOUBLE, loading the hole should
|
||||
// be impossible.
|
||||
return LoadDoubleWithHoleCheck(elements, index)
|
||||
otherwise unreachable;
|
||||
return elements.floats[index].Value() otherwise unreachable;
|
||||
}
|
||||
|
||||
macro StoreElement<ElementsAccessor : type extends ElementsKind, T: type>(
|
||||
|
@ -39,12 +39,12 @@ namespace array {
|
||||
}
|
||||
|
||||
macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined {
|
||||
const f: float64 = LoadDoubleWithHoleCheck(a, i) otherwise return Undefined;
|
||||
const f: float64 = a.floats[i].Value() otherwise return Undefined;
|
||||
return AllocateHeapNumberWithValue(f);
|
||||
}
|
||||
|
||||
macro StoreArrayHole(elements: FixedDoubleArray, k: Smi): void {
|
||||
StoreFixedDoubleArrayHoleSmi(elements, k);
|
||||
elements.floats[k] = kDoubleHole;
|
||||
}
|
||||
|
||||
macro StoreArrayHole(elements: FixedArray, k: Smi): void {
|
||||
|
@ -104,6 +104,27 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
|
||||
type bint generates 'TNode<BInt>' constexpr 'BInt';
|
||||
type string constexpr 'const char*';
|
||||
|
||||
// WARNING: The memory representation (i.e., in class fields and arrays) of
|
||||
// float64_or_hole is just a float64 that may be the hole-representing
|
||||
// signalling NaN bit-pattern. So it's memory size is that of float64 and
|
||||
// loading and storing float64_or_hole emits special code.
|
||||
struct float64_or_hole {
|
||||
macro Value(): float64 labels IfHole {
|
||||
if (this.is_hole) {
|
||||
goto IfHole;
|
||||
}
|
||||
return this.value;
|
||||
}
|
||||
macro ValueUnsafeAssumeNotHole(): float64 {
|
||||
assert(!this.is_hole);
|
||||
return this.value;
|
||||
}
|
||||
|
||||
is_hole: bool;
|
||||
value: float64;
|
||||
}
|
||||
const kDoubleHole: float64_or_hole = float64_or_hole{is_hole: true, value: 0};
|
||||
|
||||
// The HashTable inheritance hierarchy doesn't actually look like this in C++
|
||||
// because it uses some class templates that we can't yet (and may never)
|
||||
// express in Torque, but this is the expected organization of instance types.
|
||||
@ -176,6 +197,7 @@ const kVariableSizeSentinel:
|
||||
constexpr int31 generates 'kVariableSizeSentinel';
|
||||
|
||||
const kSmiTagSize: constexpr int31 generates 'kSmiTagSize';
|
||||
const kHeapObjectTag: constexpr int31 generates 'kHeapObjectTag';
|
||||
const V8_INFINITY: constexpr float64 generates 'V8_INFINITY';
|
||||
const MINUS_V8_INFINITY: constexpr float64 generates '-V8_INFINITY';
|
||||
|
||||
|
@ -206,6 +206,12 @@ Convert<uintptr, Number>(n: Number): uintptr {
|
||||
Convert<float64, float32>(f: float32): float64 {
|
||||
return ChangeFloat32ToFloat64(f);
|
||||
}
|
||||
Convert<float64_or_hole, float64>(f: float64): float64_or_hole {
|
||||
return float64_or_hole{is_hole: false, value: f};
|
||||
}
|
||||
Convert<float64_or_hole, Number>(n: Number): float64_or_hole {
|
||||
return Convert<float64_or_hole>(Convert<float64>(n));
|
||||
}
|
||||
Convert<float32, float64>(f: float64): float32 {
|
||||
return TruncateFloat64ToFloat32(f);
|
||||
}
|
||||
|
@ -417,7 +417,7 @@ namespace math {
|
||||
oneArgIsNaN = true;
|
||||
} else {
|
||||
const absValue = Float64Abs(value);
|
||||
absValues.floats[i] = absValue;
|
||||
absValues.floats[i] = Convert<float64_or_hole>(absValue);
|
||||
if (absValue > max) {
|
||||
max = absValue;
|
||||
}
|
||||
@ -437,7 +437,7 @@ namespace math {
|
||||
let sum: float64 = 0;
|
||||
let compensation: float64 = 0;
|
||||
for (let i: intptr = 0; i < length; ++i) {
|
||||
const n = absValues.floats[i] / max;
|
||||
const n = absValues.floats[i].ValueUnsafeAssumeNotHole() / max;
|
||||
const summand = n * n - compensation;
|
||||
const preliminary = sum + summand;
|
||||
compensation = (preliminary - sum) - summand;
|
||||
@ -464,7 +464,8 @@ namespace math {
|
||||
const array: FixedDoubleArray = Cast<FixedDoubleArray>(
|
||||
context[NativeContextSlot::MATH_RANDOM_CACHE_INDEX])
|
||||
otherwise unreachable;
|
||||
const random: float64 = array.floats[Convert<intptr>(newSmiIndex)];
|
||||
const random: float64 =
|
||||
array.floats[Convert<intptr>(newSmiIndex)].ValueUnsafeAssumeNotHole();
|
||||
return AllocateHeapNumberWithValue(random);
|
||||
}
|
||||
}
|
||||
|
@ -152,6 +152,23 @@ namespace torque_internal {
|
||||
_target: Slice<char8>, _originIterator: UninitializedIterator) {}
|
||||
InitializeFieldsFromIterator<char16, UninitializedIterator>(
|
||||
_target: Slice<char16>, _originIterator: UninitializedIterator) {}
|
||||
|
||||
extern macro IsDoubleHole(HeapObject, intptr): bool;
|
||||
extern macro StoreDoubleHole(HeapObject, intptr);
|
||||
|
||||
macro LoadFloat64OrHole(r:&float64_or_hole): float64_or_hole {
|
||||
return float64_or_hole{
|
||||
is_hole: IsDoubleHole(r.object, r.offset - kHeapObjectTag),
|
||||
value: * UnsafeNewReference<float64>(r.object, r.offset)
|
||||
};
|
||||
}
|
||||
macro StoreFloat64OrHole(r:&float64_or_hole, value: float64_or_hole) {
|
||||
if (value.is_hole) {
|
||||
StoreDoubleHole(r.object, r.offset - kHeapObjectTag);
|
||||
} else {
|
||||
* UnsafeNewReference<float64>(r.object, r.offset) = value.value;
|
||||
}
|
||||
}
|
||||
} // namespace torque_internal
|
||||
|
||||
// Indicates that an array-field should not be initialized.
|
||||
|
@ -2575,23 +2575,26 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
|
||||
return var_result.value();
|
||||
}
|
||||
|
||||
TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
|
||||
SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
|
||||
MachineType machine_type) {
|
||||
if (if_hole) {
|
||||
TNode<BoolT> CodeStubAssembler::IsDoubleHole(TNode<Object> base,
|
||||
TNode<IntPtrT> offset) {
|
||||
// TODO(ishell): Compare only the upper part for the hole once the
|
||||
// compiler is able to fold addition of already complex |offset| with
|
||||
// |kIeeeDoubleExponentWordOffset| into one addressing mode.
|
||||
if (Is64()) {
|
||||
TNode<Uint64T> element = Load<Uint64T>(base, offset);
|
||||
GotoIf(Word64Equal(element, Int64Constant(kHoleNanInt64)), if_hole);
|
||||
return Word64Equal(element, Int64Constant(kHoleNanInt64));
|
||||
} else {
|
||||
TNode<Uint32T> element_upper = Load<Uint32T>(
|
||||
base,
|
||||
IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset)));
|
||||
GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
|
||||
if_hole);
|
||||
base, IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset)));
|
||||
return Word32Equal(element_upper, Int32Constant(kHoleNanUpper32));
|
||||
}
|
||||
}
|
||||
|
||||
TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
|
||||
SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
|
||||
MachineType machine_type) {
|
||||
if (if_hole) {
|
||||
GotoIf(IsDoubleHole(base, offset), if_hole);
|
||||
}
|
||||
if (machine_type.IsNone()) {
|
||||
// This means the actual value is not needed.
|
||||
@ -4382,6 +4385,26 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind, Node* array,
|
||||
mode);
|
||||
}
|
||||
|
||||
void CodeStubAssembler::StoreDoubleHole(TNode<HeapObject> object,
|
||||
TNode<IntPtrT> offset) {
|
||||
TNode<UintPtrT> double_hole =
|
||||
Is64() ? ReinterpretCast<UintPtrT>(Int64Constant(kHoleNanInt64))
|
||||
: ReinterpretCast<UintPtrT>(Int32Constant(kHoleNanLower32));
|
||||
// TODO(danno): When we have a Float32/Float64 wrapper class that
|
||||
// preserves double bits during manipulation, remove this code/change
|
||||
// this to an indexed Float64 store.
|
||||
if (Is64()) {
|
||||
StoreNoWriteBarrier(MachineRepresentation::kWord64, object, offset,
|
||||
double_hole);
|
||||
} else {
|
||||
StoreNoWriteBarrier(MachineRepresentation::kWord32, object, offset,
|
||||
double_hole);
|
||||
StoreNoWriteBarrier(MachineRepresentation::kWord32, object,
|
||||
IntPtrAdd(offset, IntPtrConstant(kInt32Size)),
|
||||
double_hole);
|
||||
}
|
||||
}
|
||||
|
||||
void CodeStubAssembler::StoreFixedDoubleArrayHole(
|
||||
TNode<FixedDoubleArray> array, Node* index, ParameterMode parameter_mode) {
|
||||
CSA_SLOW_ASSERT(this, MatchesParameterMode(index, parameter_mode));
|
||||
@ -4391,22 +4414,7 @@ void CodeStubAssembler::StoreFixedDoubleArrayHole(
|
||||
CSA_ASSERT(this, IsOffsetInBounds(
|
||||
offset, LoadAndUntagFixedArrayBaseLength(array),
|
||||
FixedDoubleArray::kHeaderSize, PACKED_DOUBLE_ELEMENTS));
|
||||
TNode<UintPtrT> double_hole =
|
||||
Is64() ? ReinterpretCast<UintPtrT>(Int64Constant(kHoleNanInt64))
|
||||
: ReinterpretCast<UintPtrT>(Int32Constant(kHoleNanLower32));
|
||||
// TODO(danno): When we have a Float32/Float64 wrapper class that
|
||||
// preserves double bits during manipulation, remove this code/change
|
||||
// this to an indexed Float64 store.
|
||||
if (Is64()) {
|
||||
StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
|
||||
double_hole);
|
||||
} else {
|
||||
StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
|
||||
double_hole);
|
||||
StoreNoWriteBarrier(MachineRepresentation::kWord32, array,
|
||||
IntPtrAdd(offset, IntPtrConstant(kInt32Size)),
|
||||
double_hole);
|
||||
}
|
||||
StoreDoubleHole(array, offset);
|
||||
}
|
||||
|
||||
void CodeStubAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array,
|
||||
|
@ -1464,6 +1464,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
return LoadDoubleWithHoleCheck(array, Signed(index), if_hole);
|
||||
}
|
||||
|
||||
TNode<BoolT> IsDoubleHole(TNode<Object> base, TNode<IntPtrT> offset);
|
||||
// Load Float64 value by |base| + |offset| address. If the value is a double
|
||||
// hole then jump to |if_hole|. If |machine_type| is None then only the hole
|
||||
// check is generated.
|
||||
@ -1715,6 +1716,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
|
||||
StoreFixedDoubleArrayElement(object, index, value, SMI_PARAMETERS);
|
||||
}
|
||||
|
||||
void StoreDoubleHole(TNode<HeapObject> object, TNode<IntPtrT> offset);
|
||||
void StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array, Node* index,
|
||||
ParameterMode mode = INTPTR_PARAMETERS);
|
||||
void StoreFixedDoubleArrayHoleSmi(TNode<FixedDoubleArray> array,
|
||||
|
@ -12,7 +12,7 @@ extern class FixedArray extends FixedArrayBase { objects[length]: Object; }
|
||||
type EmptyFixedArray extends FixedArray;
|
||||
|
||||
extern class FixedDoubleArray extends FixedArrayBase {
|
||||
floats[length]: float64;
|
||||
floats[length]: float64_or_hole;
|
||||
}
|
||||
|
||||
extern class WeakFixedArray extends HeapObject { length: Smi; }
|
||||
@ -77,20 +77,12 @@ extern operator '.floats[]' macro LoadFixedDoubleArrayElement(
|
||||
FixedDoubleArray, intptr): float64;
|
||||
operator '[]=' macro StoreFixedDoubleArrayDirect(
|
||||
a: FixedDoubleArray, i: Smi, v: Number) {
|
||||
a.floats[i] = Convert<float64>(v);
|
||||
a.floats[i] = Convert<float64_or_hole>(Convert<float64>(v));
|
||||
}
|
||||
operator '[]=' macro StoreFixedArrayDirect(a: FixedArray, i: Smi, v: Object) {
|
||||
a.objects[i] = v;
|
||||
}
|
||||
|
||||
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64
|
||||
labels IfHole;
|
||||
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, intptr): float64
|
||||
labels IfHole;
|
||||
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, uintptr): float64
|
||||
labels IfHole;
|
||||
extern macro StoreFixedDoubleArrayHoleSmi(FixedDoubleArray, Smi): void;
|
||||
|
||||
extern macro AllocateZeroedFixedArray(intptr): FixedArray;
|
||||
extern macro AllocateZeroedFixedDoubleArray(intptr): FixedDoubleArray;
|
||||
extern macro CalculateNewElementsCapacity(Smi): Smi;
|
||||
|
@ -68,9 +68,8 @@ macro LoadElementNoHole<T : type extends FixedArrayBase>(
|
||||
LoadElementNoHole<FixedArray>(implicit context: Context)(
|
||||
a: JSArray, index: Smi): JSAny
|
||||
labels IfHole {
|
||||
try {
|
||||
const elements: FixedArray =
|
||||
Cast<FixedArray>(a.elements) otherwise Unexpected;
|
||||
Cast<FixedArray>(a.elements) otherwise unreachable;
|
||||
const e = UnsafeCast<(JSAny | TheHole)>(elements.objects[index]);
|
||||
typeswitch (e) {
|
||||
case (TheHole): {
|
||||
@ -80,25 +79,15 @@ LoadElementNoHole<FixedArray>(implicit context: Context)(
|
||||
return e;
|
||||
}
|
||||
}
|
||||
}
|
||||
label Unexpected {
|
||||
unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
LoadElementNoHole<FixedDoubleArray>(implicit context: Context)(
|
||||
a: JSArray, index: Smi): JSAny
|
||||
labels IfHole {
|
||||
try {
|
||||
const elements: FixedDoubleArray =
|
||||
Cast<FixedDoubleArray>(a.elements) otherwise Unexpected;
|
||||
const e: float64 =
|
||||
LoadDoubleWithHoleCheck(elements, index) otherwise IfHole;
|
||||
Cast<FixedDoubleArray>(a.elements) otherwise unreachable;
|
||||
const e: float64 = elements.floats[index].Value() otherwise IfHole;
|
||||
return AllocateHeapNumberWithValue(e);
|
||||
}
|
||||
label Unexpected {
|
||||
unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
extern builtin ExtractFastJSArray(Context, JSArray, Smi, Smi): JSArray;
|
||||
@ -176,11 +165,11 @@ struct FastJSArrayWitness {
|
||||
if (this.hasDoubles) {
|
||||
const elements = Cast<FixedDoubleArray>(this.unstable.elements)
|
||||
otherwise unreachable;
|
||||
StoreFixedDoubleArrayHoleSmi(elements, k);
|
||||
elements.floats[k] = kDoubleHole;
|
||||
} else {
|
||||
const elements = Cast<FixedArray>(this.unstable.elements)
|
||||
otherwise unreachable;
|
||||
StoreFixedArrayElement(elements, k, TheHole);
|
||||
elements.objects[k] = TheHole;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,8 @@ class ValueTypeFieldsRange {
|
||||
ValueTypeFieldIterator begin() { return {type_, 0}; }
|
||||
ValueTypeFieldIterator end() {
|
||||
size_t index = 0;
|
||||
if (const StructType* struct_type = StructType::DynamicCast(type_)) {
|
||||
const StructType* struct_type = StructType::DynamicCast(type_);
|
||||
if (struct_type && struct_type != TypeOracle::GetFloat64OrHoleType()) {
|
||||
index = struct_type->fields().size();
|
||||
}
|
||||
if (const BitFieldStructType* bit_field_struct_type =
|
||||
|
@ -44,13 +44,21 @@ static const char* const NUMBER_TYPE_STRING = "Number";
|
||||
static const char* const BUILTIN_POINTER_TYPE_STRING = "BuiltinPtr";
|
||||
static const char* const INTPTR_TYPE_STRING = "intptr";
|
||||
static const char* const UINTPTR_TYPE_STRING = "uintptr";
|
||||
static const char* const INT64_TYPE_STRING = "int64";
|
||||
static const char* const INT31_TYPE_STRING = "int31";
|
||||
static const char* const INT32_TYPE_STRING = "int32";
|
||||
static const char* const UINT31_TYPE_STRING = "uint31";
|
||||
static const char* const UINT32_TYPE_STRING = "uint32";
|
||||
static const char* const INT16_TYPE_STRING = "int16";
|
||||
static const char* const UINT16_TYPE_STRING = "uint16";
|
||||
static const char* const INT8_TYPE_STRING = "int8";
|
||||
static const char* const UINT8_TYPE_STRING = "uint8";
|
||||
static const char* const BINT_TYPE_STRING = "bint";
|
||||
static const char* const CHAR8_TYPE_STRING = "char8";
|
||||
static const char* const CHAR16_TYPE_STRING = "char16";
|
||||
static const char* const FLOAT32_TYPE_STRING = "float32";
|
||||
static const char* const FLOAT64_TYPE_STRING = "float64";
|
||||
static const char* const FLOAT64_OR_HOLE_TYPE_STRING = "float64_or_hole";
|
||||
static const char* const CONST_INT31_TYPE_STRING = "constexpr int31";
|
||||
static const char* const CONST_INT32_TYPE_STRING = "constexpr int32";
|
||||
static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
|
||||
|
@ -2149,10 +2149,27 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
|
||||
} else if (reference.IsVariableAccess()) {
|
||||
return GenerateCopy(reference.variable());
|
||||
} else if (reference.IsHeapReference()) {
|
||||
const Type* referenced_type = reference.ReferencedType();
|
||||
if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
|
||||
return GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
|
||||
"LoadFloat64OrHole"),
|
||||
Arguments{{reference.heap_reference()}, {}});
|
||||
} else if (auto* struct_type = StructType::DynamicCast(referenced_type)) {
|
||||
StackRange result_range = assembler().TopRange(0);
|
||||
for (const Field& field : struct_type->fields()) {
|
||||
StackScope scope(this);
|
||||
const std::string& fieldname = field.name_and_type.name;
|
||||
VisitResult field_value = scope.Yield(GenerateFetchFromLocation(
|
||||
GenerateFieldAccess(reference, fieldname)));
|
||||
result_range.Extend(field_value.stack_range());
|
||||
}
|
||||
return VisitResult(referenced_type, result_range);
|
||||
} else {
|
||||
GenerateCopy(reference.heap_reference());
|
||||
assembler().Emit(LoadReferenceInstruction{reference.ReferencedType()});
|
||||
DCHECK_EQ(1, LoweredSlotCount(reference.ReferencedType()));
|
||||
return VisitResult(reference.ReferencedType(), assembler().TopRange(1));
|
||||
}
|
||||
} else if (reference.IsBitFieldAccess()) {
|
||||
// First fetch the bitfield struct, then get the bits out of it.
|
||||
VisitResult bit_field_struct =
|
||||
@ -2194,7 +2211,12 @@ void ImplementationVisitor::GenerateAssignToLocation(
|
||||
ReportError("assigning a value directly to an indexed field isn't allowed");
|
||||
} else if (reference.IsHeapReference()) {
|
||||
const Type* referenced_type = reference.ReferencedType();
|
||||
if (auto* struct_type = StructType::DynamicCast(referenced_type)) {
|
||||
if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
|
||||
GenerateCall(
|
||||
QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
|
||||
"StoreFloat64OrHole"),
|
||||
Arguments{{reference.heap_reference(), assignment_value}, {}});
|
||||
} else if (auto* struct_type = StructType::DynamicCast(referenced_type)) {
|
||||
if (assignment_value.type() != referenced_type) {
|
||||
ReportError("Cannot assign to ", *referenced_type,
|
||||
" with value of type ", *assignment_value.type());
|
||||
@ -3757,6 +3779,7 @@ void GenerateClassFieldVerifier(const std::string& class_name,
|
||||
if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
|
||||
!field_type->IsStructType())
|
||||
return;
|
||||
if (field_type == TypeOracle::GetFloat64OrHoleType()) return;
|
||||
// Do not verify if the field may be uninitialized.
|
||||
if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;
|
||||
|
||||
|
@ -249,6 +249,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
|
||||
return Get().GetBuiltinType(FLOAT64_TYPE_STRING);
|
||||
}
|
||||
|
||||
static const Type* GetFloat64OrHoleType() {
|
||||
return Get().GetBuiltinType(FLOAT64_OR_HOLE_TYPE_STRING);
|
||||
}
|
||||
|
||||
static const Type* GetConstFloat64Type() {
|
||||
return Get().GetBuiltinType(CONST_FLOAT64_TYPE_STRING);
|
||||
}
|
||||
|
@ -417,7 +417,8 @@ void TypeVisitor::VisitClassFieldsAndMethods(
|
||||
ReportError("non-extern classes do not support weak fields");
|
||||
}
|
||||
}
|
||||
if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
|
||||
const StructType* struct_type = StructType::DynamicCast(field_type);
|
||||
if (struct_type && struct_type != TypeOracle::GetFloat64OrHoleType()) {
|
||||
for (const Field& struct_field : struct_type->fields()) {
|
||||
if (!struct_field.name_and_type.type->IsSubtypeOf(
|
||||
TypeOracle::GetTaggedType())) {
|
||||
|
@ -768,6 +768,9 @@ size_t AbstractType::AlignmentLog2() const {
|
||||
}
|
||||
|
||||
size_t StructType::AlignmentLog2() const {
|
||||
if (this == TypeOracle::GetFloat64OrHoleType()) {
|
||||
return TypeOracle::GetFloat64Type()->AlignmentLog2();
|
||||
}
|
||||
size_t alignment_log_2 = 0;
|
||||
for (const Field& field : fields()) {
|
||||
alignment_log_2 =
|
||||
@ -778,7 +781,8 @@ size_t StructType::AlignmentLog2() const {
|
||||
|
||||
void Field::ValidateAlignment(ResidueClass at_offset) const {
|
||||
const Type* type = name_and_type.type;
|
||||
if (const StructType* struct_type = StructType::DynamicCast(type)) {
|
||||
const StructType* struct_type = StructType::DynamicCast(type);
|
||||
if (struct_type && struct_type != TypeOracle::GetFloat64OrHoleType()) {
|
||||
for (const Field& field : struct_type->fields()) {
|
||||
field.ValidateAlignment(at_offset);
|
||||
size_t field_size = std::get<0>(field.GetFieldSizeInformation());
|
||||
@ -834,8 +838,13 @@ base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type) {
|
||||
size = TargetArchitecture::RawPtrSize();
|
||||
size_string = "kIntptrSize";
|
||||
} else if (const StructType* struct_type = StructType::DynamicCast(type)) {
|
||||
if (type == TypeOracle::GetFloat64OrHoleType()) {
|
||||
size = kDoubleSize;
|
||||
size_string = "kDoubleSize";
|
||||
} else {
|
||||
size = struct_type->PackedSize();
|
||||
size_string = std::to_string(size);
|
||||
}
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/logging.h"
|
||||
#include "src/torque/ast.h"
|
||||
#include "src/torque/constants.h"
|
||||
#include "src/torque/declarable.h"
|
||||
#include "src/torque/utils.h"
|
||||
|
||||
@ -192,9 +193,17 @@ bool IsKeywordLikeName(const std::string& s) {
|
||||
// naming convention and are those exempt from the normal type convention.
|
||||
bool IsMachineType(const std::string& s) {
|
||||
static const char* const machine_types[]{
|
||||
"void", "never", "int8", "uint8", "int16", "uint16", "int31",
|
||||
"uint31", "int32", "uint32", "int64", "intptr", "uintptr", "float32",
|
||||
"float64", "bool", "string", "bint", "char8", "char16"};
|
||||
VOID_TYPE_STRING, NEVER_TYPE_STRING,
|
||||
INT8_TYPE_STRING, UINT8_TYPE_STRING,
|
||||
INT16_TYPE_STRING, UINT16_TYPE_STRING,
|
||||
INT31_TYPE_STRING, UINT31_TYPE_STRING,
|
||||
INT32_TYPE_STRING, UINT32_TYPE_STRING,
|
||||
INT64_TYPE_STRING, INTPTR_TYPE_STRING,
|
||||
UINTPTR_TYPE_STRING, FLOAT32_TYPE_STRING,
|
||||
FLOAT64_TYPE_STRING, FLOAT64_OR_HOLE_TYPE_STRING,
|
||||
BOOL_TYPE_STRING, "string",
|
||||
BINT_TYPE_STRING, CHAR8_TYPE_STRING,
|
||||
CHAR16_TYPE_STRING};
|
||||
|
||||
return std::find(std::begin(machine_types), std::end(machine_types), s) !=
|
||||
std::end(machine_types);
|
||||
|
@ -71,6 +71,11 @@ type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
|
||||
type Context extends HeapObject generates 'TNode<Context>';
|
||||
type NativeContext extends Context;
|
||||
|
||||
struct float64_or_hole {
|
||||
is_hole: bool;
|
||||
value: float64;
|
||||
}
|
||||
|
||||
intrinsic %FromConstexpr<To: type, From: type>(b: From): To;
|
||||
intrinsic %RawDownCast<To: type, From: type>(x: From): To;
|
||||
intrinsic %RawConstexprCast<To: type, From: type>(f: From): To;
|
||||
|
4
third_party/v8/builtins/array-sort.tq
vendored
4
third_party/v8/builtins/array-sort.tq
vendored
@ -259,7 +259,7 @@ namespace array {
|
||||
try {
|
||||
const object = UnsafeCast<JSObject>(sortState.receiver);
|
||||
const elements = UnsafeCast<FixedDoubleArray>(object.elements);
|
||||
const value = LoadDoubleWithHoleCheck(elements, index) otherwise IfHole;
|
||||
const value = elements.floats[index].Value() otherwise IfHole;
|
||||
return AllocateHeapNumberWithValue(value);
|
||||
}
|
||||
label IfHole {
|
||||
@ -333,7 +333,7 @@ namespace array {
|
||||
|
||||
const object = UnsafeCast<JSObject>(sortState.receiver);
|
||||
const elements = UnsafeCast<FixedDoubleArray>(object.elements);
|
||||
StoreFixedDoubleArrayHoleSmi(elements, index);
|
||||
elements.floats[index] = kDoubleHole;
|
||||
return kSuccess;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user